Esempio n. 1
0
def convert(inp_format, inp_loc, out_format, out_loc, output_nodes, ng_backend,
            device_id, backend_optional_params, shape_hints, do_aot):
    """Functional api for converting TF models by inserting ngraph nodes.
    Sample usage:
    from tf2ngraph import convert
    convert('savedmodel', 'test_graph' , 'pbtxt', 'test_graph_ngraph.pbtxt', ['out_node'])
    convert('pbtxt', 'test_graph.pbtxt' , 'pbtxt', 'test_graph_ngraph.pbtxt', ['out_node'])

    Parameters:
    inp_format (string): 'savedmodel', 'pbtxt', 'pb'
    inp_loc (string): Location of input file or folder (in case of savedmodel)
    out_format (string): 'savedmodel', 'pbtxt', 'pb'
    out_loc (string): Location of output file or folder (in case of savedmodel)
    output_nodes (iterable of strings): names of output nodes

    Returns: void
   """
    assert inp_format in allowed_formats['input']
    assert out_format in allowed_formats['output']
    assert ngraph_bridge.is_grappler_enabled()
    input_gdef = get_gdef(inp_format, inp_loc)
    attach_device(input_gdef)
    output_gdef = run_ngraph_grappler_optimizer(input_gdef, output_nodes,
                                                ng_backend, device_id,
                                                backend_optional_params,
                                                shape_hints, do_aot)
    save_model(output_gdef, out_format, out_loc)
Esempio n. 2
0
def run_tensorflow_pytests_from_artifacts(ngraph_tf_src_dir, tf_src_dir,
                                          xml_output):
    root_pwd = os.getcwd()
    ngraph_tf_src_dir = os.path.abspath(ngraph_tf_src_dir)

    # Check to see if we need to apply the patch for Grappler
    import ngraph_bridge
    patch_file_name = "test/python/tensorflow/tf_unittest_ngraph" + (
        "_with_grappler"
        if ngraph_bridge.is_grappler_enabled() else "") + ".patch"
    patch_file = os.path.abspath(
        os.path.join(ngraph_tf_src_dir, patch_file_name))

    # Next patch the TensorFlow so that the tests run using ngraph_bridge
    pwd = os.getcwd()

    # Go to the location of TesorFlow install directory
    import tensorflow as tf
    tf_dir = tf.sysconfig.get_lib()
    os.chdir(tf_dir + '/python/framework')
    print("CURRENT DIR: " + os.getcwd())

    print("Patching TensorFlow using: %s" % patch_file)
    cmd = subprocess.Popen('patch -N -i ' + patch_file,
                           shell=True,
                           stdout=subprocess.PIPE)
    printed_lines = cmd.communicate()
    # Check if the patch is being applied for the first time, in which case
    # cmd.returncode will be 0 or if the patch has already been applied, in
    # which case the string will be found, in all other cases the assertion
    # will fail
    assert cmd.returncode == 0 or 'patch detected!  Skipping patch' in str(
        printed_lines[0]), "Error applying the patch."
    os.chdir(pwd)

    # Now run the TensorFlow python tests
    test_src_dir = os.path.join(ngraph_tf_src_dir, "test/python/tensorflow")
    test_script = os.path.join(test_src_dir, "tf_unittest_runner.py")

    test_manifest_file = TestEnv.get_test_manifest_filename()
    if not os.path.isabs(test_manifest_file):
        test_manifest_file = os.path.join(test_src_dir, test_manifest_file)

    test_xml_report = './junit_tensorflow_tests.xml'

    import psutil
    num_cores = int(psutil.cpu_count(logical=False))
    print("OMP_NUM_THREADS: %s " % str(num_cores))
    os.environ['OMP_NUM_THREADS'] = str(num_cores)
    os.environ['NGRAPH_TF_DISABLE_DEASSIGN_CLUSTERS'] = '1'

    cmd = [
        "python", test_script, "--tensorflow_path", tf_src_dir,
        "--run_tests_from_file", test_manifest_file
    ]
    if xml_output:
        cmd.extend(["--xml_report", test_xml_report])
    command_executor(cmd, verbose=True)

    os.chdir(root_pwd)
class TestRewriterConfigBackendSetting(NgraphTest):
    @pytest.mark.skipif(not ngraph_bridge.is_grappler_enabled(),
                        reason='Rewriter config only works for grappler path')
    def test_config_updater_api(self):
        dim1 = 3
        dim2 = 4
        a = tf.compat.v1.placeholder(tf.float32, shape=(dim1, dim2), name='a')
        x = tf.compat.v1.placeholder(tf.float32, shape=(dim1, dim2), name='x')
        b = tf.compat.v1.placeholder(tf.float32, shape=(dim1, dim2), name='y')
        axpy = (a * x) + b

        config = tf.compat.v1.ConfigProto()
        rewriter_options = rewriter_config_pb2.RewriterConfig()
        rewriter_options.meta_optimizer_iterations = (
            rewriter_config_pb2.RewriterConfig.ONE)
        rewriter_options.min_graph_nodes = -1
        ngraph_optimizer = rewriter_options.custom_optimizers.add()
        ngraph_optimizer.name = "ngraph-optimizer"
        config.MergeFrom(
            tf.compat.v1.ConfigProto(graph_options=tf.compat.v1.GraphOptions(
                rewrite_options=rewriter_options)))

        with tf.compat.v1.Session(config=config) as sess:
            outval = sess.run(axpy,
                              feed_dict={
                                  a: 1.5 * np.ones((dim1, dim2)),
                                  b: np.ones((dim1, dim2)),
                                  x: np.ones((dim1, dim2))
                              })
        assert (outval == 2.5 * (np.ones((dim1, dim2)))).all()
Esempio n. 4
0
    def with_ngraph(self, l, config=tf.ConfigProto()):
        if ngraph_bridge.is_grappler_enabled():
            rewrite_options = rewriter_config_pb2.RewriterConfig(
                meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.
                ONE,
                min_graph_nodes=-1,
                custom_optimizers=[
                    rewriter_config_pb2.RewriterConfig.CustomGraphOptimizer(
                        name="ngraph-optimizer")
                ])
            config = tf.ConfigProto(graph_options=tf.GraphOptions(
                rewrite_options=rewrite_options))

        ngraph_tf_disable_deassign_clusters = os.environ.pop(
            'NGRAPH_TF_DISABLE_DEASSIGN_CLUSTERS', None)

        os.environ['NGRAPH_TF_DISABLE_DEASSIGN_CLUSTERS'] = '1'
        ngraph_bridge.enable()
        with tf.Session(config=config) as sess:
            retval = l(sess)

        os.environ.pop('NGRAPH_TF_DISABLE_DEASSIGN_CLUSTERS', None)

        if ngraph_tf_disable_deassign_clusters is not None:
            os.environ['NGRAPH_TF_DISABLE_DEASSIGN_CLUSTERS'] = \
                ngraph_tf_disable_deassign_clusters

        return retval
Esempio n. 5
0
    def test_disable_op_2(self, invalid_op_list):
        # This test is disabled for grappler because grappler fails silently and
        # TF continues to run with the unoptimized graph
        # Note, tried setting fail_on_optimizer_errors, but grappler still failed silently
        # TODO: enable this test for grappler as well.
        if (not ngraph_bridge.is_grappler_enabled()):
            ngraph_bridge.set_disabled_ops(invalid_op_list)
            a = tf.placeholder(tf.int32, shape=(5, ))
            b = tf.constant(np.ones((5, )), dtype=tf.int32)
            c = a + b

            def run_test(sess):
                return sess.run(c, feed_dict={a: np.ones((5, ))})

            assert (self.without_ngraph(run_test) == np.ones(5, ) * 2).all()
            #import pdb; pdb.set_trace()
            try:
                # This test is expected to fail,
                # since all the strings passed to set_disabled_ops have invalid ops in them
                res = self.with_ngraph(run_test)
            except:
                # Clean up
                ngraph_bridge.set_disabled_ops('')
                return
            assert False, 'Had expected test to raise error'
Esempio n. 6
0
def run_tensorflow_pytests_from_artifacts(backend, ngraph_tf_src_dir,
                                          tf_src_dir, xml_output):
    root_pwd = os.getcwd()

    ngraph_tf_src_dir = os.path.abspath(ngraph_tf_src_dir)

    # Check to see if we need to apply the patch for Grappler
    import ngraph_bridge
    patch_file_name = "test/python/tensorflow/tf_unittest_ngraph" + (
        "_with_grappler"
        if ngraph_bridge.is_grappler_enabled() else "") + ".patch"
    patch_file = os.path.abspath(
        os.path.join(ngraph_tf_src_dir, patch_file_name))

    # Next patch the TensorFlow so that the tests run using ngraph_bridge
    pwd = os.getcwd()

    # Go to the location of TesorFlow install directory
    import tensorflow as tf
    tf_dir = tf.sysconfig.get_lib()
    os.chdir(os.path.join(tf_dir, '../'))
    print("CURRENT DIR: " + os.getcwd())

    print("Patching TensorFlow using: %s" % patch_file)
    apply_patch(patch_file)
    os.chdir(pwd)

    # Now run the TensorFlow python tests
    test_src_dir = os.path.join(ngraph_tf_src_dir, "test/python/tensorflow")
    test_script = os.path.join(test_src_dir, "tf_unittest_runner.py")
    if backend is not None and 'GPU' in backend:
        test_manifest_file = os.path.join(test_src_dir,
                                          "python_tests_list_gpu.txt")
    else:
        test_manifest_file = os.path.join(test_src_dir,
                                          "python_tests_list.txt")
    test_xml_report = './junit_tensorflow_tests.xml'

    import psutil
    num_cores = int(psutil.cpu_count(logical=False))
    print("OMP_NUM_THREADS: %s " % str(num_cores))
    os.environ['OMP_NUM_THREADS'] = str(num_cores)
    os.environ['NGRAPH_TF_DISABLE_DEASSIGN_CLUSTERS'] = '1'

    # should this python be sys.executable?
    cmd = [
        "python",
        test_script,
        "--tensorflow_path",
        tf_src_dir,
        "--run_tests_from_file",
        test_manifest_file,
    ]
    if xml_output:
        cmd.extend(["--xml_report", test_xml_report])
    command_executor(cmd)

    os.chdir(root_pwd)
    def test_command_line_api(self, inp_format, inp_loc, out_format,
                              commandline, ng_device):
        # Only run this test when grappler is enabled
        if not ngraph_bridge.is_grappler_enabled():
            return
        assert TestConversionScript.format_and_loc_match(inp_format, inp_loc)
        out_loc = inp_loc.split('.')[0] + '_modified' + (
            '' if out_format == 'savedmodel' else ('.' + out_format))
        try:
            (shutil.rmtree, os.remove)[os.path.isfile(out_loc)](out_loc)
        except:
            pass
        conversion_successful = False
        try:
            if commandline:
                # In CI this test is expected to be run out of artifacts/test/python
                command_executor('python ../../tools/tf2ngraph.py --input' +
                                 inp_format + ' ' + inp_loc +
                                 ' --outnodes out_node --output' + out_format +
                                 ' ' + out_loc + ' --ngbackend ' + ng_device)
            else:
                convert(inp_format, inp_loc, out_format, out_loc, ['out_node'],
                        ng_device)
            conversion_successful = True
        finally:
            if not conversion_successful:
                try:
                    (shutil.rmtree, os.remove)[os.path.isfile(out_loc)](out_loc)
                except:
                    pass
        assert conversion_successful

        gdef = get_gdef(out_format, out_loc)
        (shutil.rmtree, os.remove)[os.path.isfile(out_loc)](out_loc)

        with tf.Graph().as_default() as g:
            tf.import_graph_def(gdef, name='')
            # The graph should have exactly one encapsulate
            assert len([
                0 for i in g.get_operations() if i.type == 'NGraphEncapsulate'
            ]) == 1
            x = self.get_tensor(g, "x:0", False)
            y = self.get_tensor(g, "y:0", False)
            out = self.get_tensor(g, "out_node:0", False)

            sess_fn = lambda sess: sess.run(
                [out], feed_dict={i: np.zeros((10,)) for i in [x, y]})

            res1 = self.with_ngraph(sess_fn)
            res2 = self.without_ngraph(sess_fn)

            exp = [0.5 * np.ones((10,))]
            # Note both run on Host (because NgraphEncapsulate can only run on host)
            assert np.isclose(res1, res2).all()
            # Comparing with expected value
            assert np.isclose(res1, exp).all()
Esempio n. 8
0
def run_resnet50_forward_pass_from_artifacts(ngraph_tf_src_dir, artifact_dir,
                                             batch_size, iterations):

    root_pwd = os.getcwd()
    artifact_dir = os.path.abspath(artifact_dir)
    ngraph_tf_src_dir = os.path.abspath(ngraph_tf_src_dir)
    install_ngraph_bridge(artifact_dir)

    # Now clone the repo and proceed
    call(['git', 'clone', 'https://github.com/tensorflow/benchmarks.git'])
    os.chdir('benchmarks')
    call(['git', 'checkout', '4c7b09ad87bbfc4b1f89650bcee40b3fc5e7dfed'])

    # Check to see if we need to patch the repo for Grappler
    # benchmark_cnn.patch will only work for the CPU backend
    patch_file = os.path.abspath(
        os.path.join(ngraph_tf_src_dir, "test/grappler/benchmark_cnn.patch"))
    import ngraph_bridge
    if ngraph_bridge.is_grappler_enabled():
        print("Patching repo using: %s" % patch_file)
        apply_patch(patch_file)

    os.chdir('scripts/tf_cnn_benchmarks/')

    # junit_script = os.path.abspath('%s/test/ci/junit-wrap.sh' % root_pwd)

    # Update the script by adding `import ngraph_bridge`
    with open('convnet_builder.py', 'a') as outfile:
        call(['echo', 'import ngraph_bridge'], stdout=outfile)

    # Setup the env flags
    import psutil
    num_cores = int(psutil.cpu_count(logical=False))
    print("OMP_NUM_THREADS: %s " % str(num_cores))

    os.environ['OMP_NUM_THREADS'] = str(num_cores)
    os.environ["KMP_AFFINITY"] = 'granularity=fine,compact,1,0'

    cmd = [
        'python',
        'tf_cnn_benchmarks.py',
        '--data_format',
        'NCHW',
        '--num_inter_threads',
        '2',
        '--freeze_when_forward_only=True',
        '--model=resnet50',
        '--batch_size=' + str(batch_size),
        '--num_batches',
        str(iterations),
    ]
    command_executor(cmd, verbose=True)

    os.chdir(root_pwd)
class TestRewriterConfigBackendSetting(NgraphTest):
    @pytest.mark.skipif(not ngraph_bridge.is_grappler_enabled(),
                        reason='Rewriter config only works for grappler path')
    @pytest.mark.parametrize(("backend", ), (
        ('CPU', ),
        ('INTERPRETER', ),
    ))
    def test_config_updater_api(self, backend):
        dim1 = 3
        dim2 = 4
        a = tf.placeholder(tf.float32, shape=(dim1, dim2), name='a')
        x = tf.placeholder(tf.float32, shape=(dim1, dim2), name='x')
        b = tf.placeholder(tf.float32, shape=(dim1, dim2), name='y')
        axpy = (a * x) + b

        config = tf.ConfigProto()
        rewriter_options = rewriter_config_pb2.RewriterConfig()
        rewriter_options.meta_optimizer_iterations = (
            rewriter_config_pb2.RewriterConfig.ONE)
        rewriter_options.min_graph_nodes = -1
        ngraph_optimizer = rewriter_options.custom_optimizers.add()
        ngraph_optimizer.name = "ngraph-optimizer"
        ngraph_optimizer.parameter_map["ngraph_backend"].s = backend.encode()
        ngraph_optimizer.parameter_map["device_id"].s = b'0'
        # TODO: This test will pass if grappler fails silently.
        # Need to do something about that
        backend_extra_params_map = {
            'CPU': {
                'device_config': ''
            },
            'INTERPRETER': {
                'test_echo': '42',
                'hello': '3'
            }
        }
        extra_params = backend_extra_params_map[backend]
        for k in extra_params:
            ngraph_optimizer.parameter_map[k].s = extra_params[k].encode()
        config.MergeFrom(
            tf.ConfigProto(graph_options=tf.GraphOptions(
                rewrite_options=rewriter_options)))

        with tf.Session(config=config) as sess:
            outval = sess.run(axpy,
                              feed_dict={
                                  a: 1.5 * np.ones((dim1, dim2)),
                                  b: np.ones((dim1, dim2)),
                                  x: np.ones((dim1, dim2))
                              })
        assert (outval == 2.5 * (np.ones((dim1, dim2)))).all()
Esempio n. 10
0
class TestUpdateConfig(NgraphTest):
    @pytest.mark.skipif(not ngraph_bridge.is_grappler_enabled(),
                        reason='Only for Grappler')
    def test_update_config(self):
        config = tf.compat.v1.ConfigProto()
        config.allow_soft_placement = True
        config_new = ngraph_bridge.update_config(config)
        rewriter_options = config_new.graph_options.rewrite_options
        ngraph_optimizer_name = rewriter_options.custom_optimizers[0].name
        assert ngraph_optimizer_name == 'ngraph-optimizer'
        ngraph_optimizer = rewriter_options.custom_optimizers[0]
        ngraph_optimizer.parameter_map["max_batch_size"].s = b'64'
        ngraph_optimizer.parameter_map["ice_cores"].s = b'12'
        assert config_new.__str__(
        ) == 'allow_soft_placement: true\ngraph_options {\n  rewrite_options {\n    meta_optimizer_iterations: ONE\n    min_graph_nodes: -1\n    custom_optimizers {\n      name: "ngraph-optimizer"\n      parameter_map {\n        key: "device_id"\n        value {\n          s: ""\n        }\n      }\n      parameter_map {\n        key: "ice_cores"\n        value {\n          s: "12"\n        }\n      }\n      parameter_map {\n        key: "max_batch_size"\n        value {\n          s: "64"\n        }\n      }\n      parameter_map {\n        key: "ngraph_backend"\n        value {\n          s: "CPU"\n        }\n      }\n    }\n  }\n}\n'

    @pytest.mark.skipif(not ngraph_bridge.is_grappler_enabled(),
                        reason='Only for Grappler')
    def test_update_config_adds_optimizer_only_once(self):

        # Helper function to count the number of occurances in a config
        def count_ng_optimizers(config):
            custom_opts = config.graph_options.rewrite_options.custom_optimizers
            count = 0
            for i in range(len(custom_opts)):
                if custom_opts[i].name == 'ngraph-optimizer':
                    count += 1
            return count

        # allow_soft_placement is set just to simulate
        # a real world non-empty initial ConfigProto
        config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
        assert count_ng_optimizers(config) == 0
        config_new_1 = ngraph_bridge.update_config(config)
        config_new_2 = ngraph_bridge.update_config(config_new_1)
        assert count_ng_optimizers(config) == count_ng_optimizers(
            config_new_1) == count_ng_optimizers(config_new_2) == 1
Esempio n. 11
0
class TestUpdateConfig(NgraphTest):
    @pytest.mark.skipif(not ngraph_bridge.is_grappler_enabled(),
                        reason='Only for Grappler')
    def test_update_config(self):
        config = tf.ConfigProto()
        config.allow_soft_placement = True
        config_new = ngraph_bridge.update_config(config)
        rewriter_options = config_new.graph_options.rewrite_options
        ngraph_optimizer_name = rewriter_options.custom_optimizers[0].name
        assert ngraph_optimizer_name == 'ngraph-optimizer'
        ngraph_optimizer = rewriter_options.custom_optimizers[0]
        ngraph_optimizer.parameter_map["max_batch_size"].s = b'64'
        ngraph_optimizer.parameter_map["ice_cores"].s = b'12'
        assert config_new.__str__(
        ) == 'allow_soft_placement: true\ngraph_options {\n  rewrite_options {\n    meta_optimizer_iterations: ONE\n    min_graph_nodes: -1\n    custom_optimizers {\n      name: "ngraph-optimizer"\n      parameter_map {\n        key: "device_id"\n        value {\n          s: ""\n        }\n      }\n      parameter_map {\n        key: "ice_cores"\n        value {\n          s: "12"\n        }\n      }\n      parameter_map {\n        key: "max_batch_size"\n        value {\n          s: "64"\n        }\n      }\n      parameter_map {\n        key: "ngraph_backend"\n        value {\n          s: "CPU"\n        }\n      }\n    }\n  }\n}\n'
Esempio n. 12
0
def run_resnet50_forward_pass(build_dir):

    root_pwd = os.getcwd()
    build_dir = os.path.abspath(build_dir)
    ngraph_tf_src_dir = os.path.abspath(build_dir + '/../')
    os.chdir(build_dir)

    call(['git', 'clone', 'https://github.com/tensorflow/benchmarks.git'])
    os.chdir('benchmarks')
    call(['git', 'checkout', '4c7b09ad87bbfc4b1f89650bcee40b3fc5e7dfed'])

    junit_script = os.path.abspath('%s/test/ci/junit-wrap.sh' % root_pwd)

    # Check to see if we need to patch the repo for Grappler
    # benchmark_cnn.patch will only work for the CPU backend
    patch_file = os.path.abspath(
        os.path.join(ngraph_tf_src_dir, "test/grappler/benchmark_cnn.patch"))
    import ngraph_bridge
    if ngraph_bridge.is_grappler_enabled():
        print("Patching repo using: %s" % patch_file)
        apply_patch(patch_file)

    os.chdir('scripts/tf_cnn_benchmarks/')
    # Update the script by adding `import ngraph_bridge`
    with open('convnet_builder.py', 'a') as outfile:
        call(['echo', 'import ngraph_bridge'], stdout=outfile)

    # Setup the env flags
    import psutil
    num_cores = int(psutil.cpu_count(logical=False))
    print("OMP_NUM_THREADS: %s " % str(num_cores))

    os.environ['OMP_NUM_THREADS'] = str(num_cores)
    os.environ["KMP_AFFINITY"] = 'granularity=fine,compact,1,0'

    os.environ['JUNIT_WRAP_FILE'] = "%s/junit_inference_test.xml" % build_dir
    os.environ['JUNIT_WRAP_SUITE'] = 'models'
    os.environ['JUNIT_WRAP_TEST'] = 'resnet50-inference'

    # Run inference job
    cmd = [
        junit_script, 'python', 'tf_cnn_benchmarks.py', '--data_format', 'NHWC',
        '--num_inter_threads', '2', '--freeze_when_forward_only=True',
        '--model=resnet50', '--batch_size=1', '--num_batches', '32'
    ]
    command_executor(cmd, verbose=True)
    os.chdir(root_pwd)
Esempio n. 13
0
 def test_MLP(self):
     cwd = os.getcwd()
     os.chdir('../model_level_tests/')
     grappler = ngraph_bridge.is_grappler_enabled()
     varopts = ngraph_bridge.are_variables_enabled()
     if grappler:
         if varopts:
             assert False, "Varopts and grappler does not build together right now"
         else:
             config = "grappler"
     else:
         config = "varopts" if varopts else "default"
     try:
         command_executor(
             "python test_main.py --run_basic_tests --models MLP --ignore_test time --configuration "
             + config)
     finally:
         os.chdir(cwd)
Esempio n. 14
0
def convert(inp_format, inp_loc, out_format, out_loc, output_nodes, ng_backend,
            device_id, backend_optional_params, shape_hints, do_aot,
            save_ng_clusters):
    """Functional api for converting TF models by inserting ngraph nodes.
    Sample usage:
    from tf2ngraph import convert
    convert('savedmodel', 'test_graph' , 'pbtxt', 'test_graph_ngraph.pbtxt', ['out_node'])
    convert('pbtxt', 'test_graph.pbtxt' , 'pbtxt', 'test_graph_ngraph.pbtxt', ['out_node'])

    Parameters:
    inp_format (string): 'savedmodel', 'pbtxt', 'pb'
    inp_loc (string): Location of input file or folder (in case of savedmodel)
    out_format (string): 'savedmodel', 'pbtxt', 'pb'
    out_loc (string): Location of output file or folder (in case of savedmodel)
    output_nodes (iterable of strings): names of output nodes

    Returns: void
   """
    exit_on_error(
        inp_format in allowed_formats['input'], 'Unsupported input format ' +
        inp_format + ". Supported formats: " + str(allowed_formats['input']))
    exit_on_error(
        out_format in allowed_formats['output'], 'Unsupported output format ' +
        out_format + ". Supported formats: " + str(allowed_formats['output']))
    exit_on_error(
        ngraph_bridge.is_grappler_enabled(),
        "ngraph-bridge is not built with grappler enabled, hence tf2ngraph is not supported."
    )
    input_gdef = get_gdef(inp_format, inp_loc)
    attach_device(input_gdef)
    output_gdef = run_ngraph_grappler_optimizer(
        input_gdef, output_nodes, ng_backend, device_id,
        backend_optional_params, shape_hints, do_aot)
    if save_ng_clusters:
        for fn in output_gdef.library.function:
            tf.io.write_graph(
                function_def_to_graph(fn).as_graph_def(),
                '.',
                fn.signature.name + '.pbtxt',
                as_text=True)
    save_model(output_gdef, out_format, out_loc)
class TestUpdateConfig(NgraphTest):
    @pytest.mark.skipif(not ngraph_bridge.is_grappler_enabled(),
                        reason='Only for Grappler')
    def test_update_config_adds_optimizer_only_once(self):

        # Helper function to count the number of occurances in a config
        def count_ng_optimizers(config):
            custom_opts = config.graph_options.rewrite_options.custom_optimizers
            count = 0
            for i in range(len(custom_opts)):
                if custom_opts[i].name == 'ngraph-optimizer':
                    count += 1
            return count

        # allow_soft_placement is set just to simulate
        # a real world non-empty initial ConfigProto
        config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
        assert count_ng_optimizers(config) == 0
        config_new_1 = ngraph_bridge.update_config(config)
        config_new_2 = ngraph_bridge.update_config(config_new_1)
        assert count_ng_optimizers(config) == count_ng_optimizers(
            config_new_1) == count_ng_optimizers(config_new_2) == 1
class Testtf2ngraphShapehints(NgraphTest):

    @pytest.mark.parametrize(
        ('p0_shape', 'p1_shape', 'p0_actual_shape', 'p1_actual_shape',
         'shapehints'),
        (
            ([2, 2], [2, 2], [2, 2], [2, 2], [{}
                                             ]),  # np input needs shape hints
            ([2, 2], [None, 2], [2, 2], [2, 2], [{
                'y': [2, -1]
            }]),  # only 1 input needs shape hints
            (
                [2, None],
                [None, 3],
                [2, 3],
                [2, 3],
                [{
                    'y': [2, -1],
                    'x': [2, 3]  # both inputs need shape hints
                }]),
            ([None, None], [None, None], [5, 1], [5, 1], [{
                'y': [2, 3],
                'x': [2, 3]
            }, {
                'y': [5, 1],
                'x': [5, 1]
            }]),  # 2 executables are compiled
        ))
    @pytest.mark.skipif(
        not ngraph_bridge.is_grappler_enabled(),
        reason="Requires grappler build for tf2ngraph and AOT")
    def test_tf2ngraph_with_shape_hints_0(self, p0_shape, p1_shape,
                                          p0_actual_shape, p1_actual_shape,
                                          shapehints):
        helper(p0_shape, p1_shape, p0_actual_shape, p1_actual_shape, shapehints)

    @pytest.mark.parametrize(
        ('p0_shape', 'p1_shape', 'p0_actual_shape', 'p1_actual_shape',
         'shapehints'),
        (
            ([2, 2], [None, 2], [2, 2], [2, 2], [{
                'y': [2, 3]
            }]),  # conflicting shape hint
            ([2, 2], [None, 2], [2, 2], [2, 2], [{
                'y': [2]
            }]),  # shape hint is of conflicting rank
            ([2, 2], [None, 2], [2, 5], [2, 5], [{
                'y': [2, 2]
            }]),  # During run time bad shapes are passed
            ([2, 2], [None, 2], [2, 2], [2, 2], [{
                'x': [2, -1]
            }]),  # Input y does not have enough hints to concretize it
            ([2, 2], [None, 2], [2, 2], [2, 2], [{
                'y': [2, -1],
                'bogus': [1, 2]
            }]),  # passing a bogus node name
        ))
    @pytest.mark.skipif(
        not ngraph_bridge.is_grappler_enabled(),
        reason="Requires grappler build for tf2ngraph and AOT")
    def test_tf2ngraph_with_shape_hints_1(self, p0_shape, p1_shape,
                                          p0_actual_shape, p1_actual_shape,
                                          shapehints):
        with pytest.raises(Exception):
            helper(p0_shape, p1_shape, p0_actual_shape, p1_actual_shape,
                   shapehints)
Esempio n. 17
0
def train_mnist_cnn(FLAGS):
    # Config
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False,
                            inter_op_parallelism_threads=1)
    # Enable the custom optimizer using the rewriter config options
    if ngraph_bridge.is_grappler_enabled():
        rewrite_options = rewriter_config_pb2.RewriterConfig(
            meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE,
            custom_optimizers=[
                rewriter_config_pb2.RewriterConfig.CustomGraphOptimizer(
                    name="ngraph-optimizer")
            ])
        config.MergeFrom(
            tf.ConfigProto(graph_options=tf.GraphOptions(
                rewrite_options=rewrite_options)))

    # Note: Additional configuration option to boost performance is to set the
    # following environment for the run:
    # OMP_NUM_THREADS=44 KMP_AFFINITY=granularity=fine,scatter
    # The OMP_NUM_THREADS number should correspond to the number of
    # cores in the system

    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

    # Create the model
    x = tf.placeholder(tf.float32, [None, 784])

    # Define loss and optimizer
    y_ = tf.placeholder(tf.float32, [None, 10])

    # Build the graph for the deep net
    y_conv, keep_prob = deepnn(x)

    with tf.name_scope('loss'):
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_,
                                                                logits=y_conv)
    cross_entropy = tf.reduce_mean(cross_entropy)

    with tf.name_scope('adam_optimizer'):
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
        correct_prediction = tf.cast(correct_prediction, tf.float32)
    accuracy = tf.reduce_mean(correct_prediction)
    tf.summary.scalar('Training accuracy', accuracy)
    tf.summary.scalar('Loss function', cross_entropy)

    graph_location = "/tmp/" + getpass.getuser(
    ) + "/tensorboard-logs/mnist-convnet"
    print('Saving graph to: %s' % graph_location)

    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(graph_location)
    train_writer.add_graph(tf.get_default_graph())

    saver = tf.train.Saver()

    with tf.Session(config=config) as sess:

        sess.run(tf.global_variables_initializer())
        train_loops = FLAGS.train_loop_count
        loss_values = []
        for i in range(train_loops):
            batch = mnist.train.next_batch(FLAGS.batch_size)
            if i % 10 == 0:
                t = time.time()
                train_accuracy = accuracy.eval(feed_dict={
                    x: batch[0],
                    y_: batch[1],
                    keep_prob: 1.0
                })
                #tf.summary.scalar('Training accuracy', train_accuracy)
                print('step %d, training accuracy %g, %g sec to evaluate' %
                      (i, train_accuracy, time.time() - t))
            t = time.time()
            _, summary, loss = sess.run([train_step, merged, cross_entropy],
                                        feed_dict={
                                            x: batch[0],
                                            y_: batch[1],
                                            keep_prob: 0.5
                                        })
            loss_values.append(loss)
            print('step %d, loss %g, %g sec for training step' %
                  (i, loss, time.time() - t))
            train_writer.add_summary(summary, i)

        print("Training finished. Running test")

        num_test_images = FLAGS.test_image_count
        x_test = mnist.test.images[:num_test_images]
        y_test = mnist.test.labels[:num_test_images]

        test_accuracy = accuracy.eval(feed_dict={
            x: x_test,
            y_: y_test,
            keep_prob: 1.0
        })
        print('test accuracy %g' % test_accuracy)
        saver.save(sess, FLAGS.model_dir)
        return loss_values, test_accuracy
    def test_command_line_api(self, inp_format, inp_loc, out_node_name,
                              save_ng_clusters, out_format, commandline,
                              ng_device, shape_hints, precompile):
        # Only run this test when grappler is enabled
        if not ngraph_bridge.is_grappler_enabled():
            return

        # Store and unset env variable NGRAPH_TF_BACKEND because the test
        # implicitly tests with different options
        env_var_map = self.store_env_variables(["NGRAPH_TF_BACKEND"])
        self.set_env_variable("NGRAPH_TF_BACKEND", ng_device)
        ngraph_bridge.set_backend("INTERPRETER")

        assert Testtf2ngraph.format_and_loc_match(inp_format, inp_loc)
        out_loc = inp_loc.split('.')[0] + '_modified' + ('' if out_format
                                                         == 'savedmodel' else
                                                         ('.' + out_format))
        try:
            (shutil.rmtree, os.remove)[os.path.isfile(out_loc)](out_loc)
        except:
            pass
        conversion_successful = False
        try:
            optional_backend_params = {
                'CPU': {
                    'device_config': '0'
                },
                'INTERPRETER': {
                    'test_echo': '1'
                }
            }[ng_device]
            config_file_name = 'temp_config_file.json'
            Tf2ngraphJson.dump_json(config_file_name, optional_backend_params,
                                    shape_hints)
            if commandline:
                # In CI this test is expected to be run out of artifacts/test/python
                # out_node_str is empty if out_node_name is None.
                # Automatic output node inference display diagnostic logs
                # But the tf2ngraph call will still fail
                command = [
                    'python', '../../tools/tf2ngraph.py',
                    '--input_' + inp_format, inp_loc, '--output_' + out_format,
                    out_loc, '--ng_backend', ng_device, '--config_file',
                    config_file_name
                ]
                if out_node_name is not None:
                    command.extend(['--output_nodes', out_node_name])
                if precompile:
                    command.append('--precompile')
                if save_ng_clusters:
                    command.append('--save_ng_clusters')
                p = Popen(command, stdin=PIPE, stdout=PIPE, stderr=PIPE)
                output, err = p.communicate()
                rc = p.returncode
                if out_node_name is None:
                    assert rc != 0, "Call to tf2ngraph should fail when no output name is provided"
                    return
            else:
                convert(inp_format, inp_loc, out_format, out_loc, ['out_node'],
                        ng_device, "", optional_backend_params, shape_hints,
                        precompile, save_ng_clusters)
            file_present = 'ngraph_cluster_0.pbtxt' in os.listdir()
            assert save_ng_clusters == file_present
            conversion_successful = True
        finally:
            if not conversion_successful:
                try:
                    (shutil.rmtree,
                     os.remove)[os.path.isfile(out_loc)](out_loc)
                    os.remove(config_file_name)
                except:
                    pass
            if save_ng_clusters and 'ngraph_cluster_0.pbtxt' in os.listdir():
                os.remove('ngraph_cluster_0.pbtxt')
        assert conversion_successful

        gdef = get_gdef(out_format, out_loc)
        (shutil.rmtree, os.remove)[os.path.isfile(out_loc)](out_loc)
        os.remove(config_file_name)

        with tf.Graph().as_default() as g:
            tf.import_graph_def(gdef, name='')
            # The graph should have exactly one encapsulate
            assert len([
                0 for i in g.get_operations() if i.type == 'NGraphEncapsulate'
            ]) == 1
            # TODO: check that the encapsulate op has correct backend and extra params attached to it
            x = self.get_tensor(g, "x:0", False)
            y = self.get_tensor(g, "y:0", False)
            out = self.get_tensor(g, "out_node:0", False)

            sess_fn = lambda sess: sess.run(
                [out], feed_dict={i: np.zeros((10, ))
                                  for i in [x, y]})

            res1 = self.with_ngraph(sess_fn)
            res2 = self.without_ngraph(sess_fn)

            exp = [0.5 * np.ones((10, ))]
            # Note both run on Host (because NgraphEncapsulate can only run on host)
            assert np.isclose(res1, res2).all()
            # Comparing with expected value
            assert np.isclose(res1, exp).all()

        # Restore env variable NGRAPH_TF_BACKEND that was stored
        self.restore_env_variables(env_var_map)
Esempio n. 19
0
def run_resnet50_from_artifacts(ngraph_tf_src_dir, artifact_dir, batch_size,
                                iterations):

    root_pwd = os.getcwd()
    artifact_dir = os.path.abspath(artifact_dir)
    ngraph_tf_src_dir = os.path.abspath(ngraph_tf_src_dir)
    install_ngraph_bridge(artifact_dir)

    # Now clone the repo and proceed
    call(['git', 'clone', 'https://github.com/tensorflow/benchmarks.git'])
    os.chdir('benchmarks')
    call(['git', 'checkout', '4c7b09ad87bbfc4b1f89650bcee40b3fc5e7dfed'])

    # Check to see if we need to patch the repo for Grappler
    patch_file = os.path.abspath(
        os.path.join(ngraph_tf_src_dir, "test/grappler/benchmark_cnn.patch"))
    import ngraph_bridge
    if ngraph_bridge.is_grappler_enabled():
        print("Patching repo using: %s" % patch_file)
        apply_patch(patch_file)

    os.chdir('scripts/tf_cnn_benchmarks/')

    # junit_script = os.path.abspath('%s/test/ci/junit-wrap.sh' % root_pwd)

    # Update the script by adding `import ngraph_bridge`
    with open('convnet_builder.py', 'a') as outfile:
        call(['echo', 'import ngraph_bridge'], stdout=outfile)

    # Setup the env flags
    import psutil
    num_cores = int(psutil.cpu_count(logical=False))
    print("OMP_NUM_THREADS: %s " % str(num_cores))

    os.environ['OMP_NUM_THREADS'] = str(num_cores)
    os.environ["KMP_AFFINITY"] = 'granularity=fine,compact,1,0'

    # Delete the temporary model save directory
    model_save_dir = os.getcwd() + '/modelsavepath'
    if os.path.exists(model_save_dir) and os.path.isdir(model_save_dir):
        shutil.rmtree(model_save_dir)

    eval_eventlog_dir = os.getcwd() + '/eval_eventlog_dir'
    if os.path.exists(eval_eventlog_dir) and os.path.isdir(eval_eventlog_dir):
        shutil.rmtree(eval_eventlog_dir)

    # os.environ['JUNIT_WRAP_FILE'] = "%s/junit_training_test.xml" % build_dir
    # os.environ['JUNIT_WRAP_SUITE'] = 'models'
    # os.environ['JUNIT_WRAP_TEST'] = 'resnet50-training'

    # Run training job
    # cmd = [
    #     junit_script, 'python', 'tf_cnn_benchmarks.py', '--data_format',
    #     'NCHW', '--num_inter_threads', '1', '--train_dir=' + model_save_dir,
    #     '--num_batches', '10', '--model=resnet50', '--batch_size=128'
    # ]

    cmd = [
        'python', 'tf_cnn_benchmarks.py', '--data_format', 'NCHW',
        '--num_inter_threads', '1', '--train_dir=' + model_save_dir,
        '--num_batches',
        str(iterations), '--model=resnet50', '--batch_size=' + str(batch_size),
        '--eval_dir=' + eval_eventlog_dir
    ]
    command_executor(cmd, verbose=True)

    # os.environ['JUNIT_WRAP_FILE'] = "%s/junit_inference_test.xml" % build_dir
    # os.environ['JUNIT_WRAP_SUITE'] = 'models'
    # os.environ['JUNIT_WRAP_TEST'] = 'resnet50-inference'

    # Run inference job
    # cmd = [
    #     junit_script, 'python', 'tf_cnn_benchmarks.py', '--data_format',
    #     'NCHW', '--num_inter_threads', '1', '--train_dir=' + model_save_dir,
    #     '--model=resnet50', '--batch_size=128', '--num_batches', '10', '--eval'
    # ]
    cmd = [
        'python', 'tf_cnn_benchmarks.py', '--data_format', 'NCHW',
        '--num_inter_threads', '1', '--train_dir=' + model_save_dir,
        '--model=resnet50', '--batch_size=' + str(batch_size), '--num_batches',
        str(iterations), '--eval', '--eval_dir=' + eval_eventlog_dir
    ]
    command_executor(cmd, verbose=True)

    os.chdir(root_pwd)
Esempio n. 20
0
def main():
    '''
    Builds TensorFlow, ngraph, and ngraph-tf for python 3
    '''

    # Component versions
    ngraph_version = "effcc47d39de1d5e662d6b78a8d42675ecf7815d"  # Scott Cyphers CropAndResize op
    tf_version = "v1.14.0"

    # Command line parser options
    parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)

    parser.add_argument(
        '--debug_build',
        help="Builds a debug version of the nGraph components\n",
        action="store_true")

    parser.add_argument(
        '--verbose_build',
        help="Display verbose error messages\n",
        action="store_true")

    parser.add_argument(
        '--target_arch',
        help=
        "Architecture flag to use (e.g., haswell, core-avx2 etc. Default \'native\'\n",
    )

    parser.add_argument(
        '--build_gpu_backend',
        help=
        "nGraph backends will include nVidia GPU. Use: NGRAPH_TF_BACKEND=GPU\n"
        "Note: You need to have CUDA headers and libraries available on the build system.\n",
        action="store_true")

    parser.add_argument(
        '--build_plaidml_backend',
        help=
        "nGraph backends will include PlaidML backend. Use: NGRAPH_TF_BACKEND=PLAIDML\n",
        action="store_true")

    parser.add_argument(
        '--build_intelgpu_backend',
        help=
        "nGraph backends will include Intel GPU bckend. Use: NGRAPH_TF_BACKEND=INTELGPU\n",
        action="store_true")

    parser.add_argument(
        '--use_prebuilt_tensorflow',
        help="Skip building TensorFlow and use downloaded version.\n" +
        "Note that in this case C++ unit tests won't be build for nGraph-TF bridge",
        action="store_true")

    parser.add_argument(
        '--distributed_build',
        type=str,
        help="Builds a distributed version of the nGraph components\n",
        action="store")

    parser.add_argument(
        '--enable_variables_and_optimizers',
        help=
        "Ops like variable and optimizers are supported by nGraph in this version of the bridge\n",
        action="store_true")

    parser.add_argument(
        '--use_grappler_optimizer',
        help="Use Grappler optimizer instead of the optimization passes\n",
        action="store_true")

    parser.add_argument(
        '--artifacts_dir',
        type=str,
        help="Copy the artifacts to the given directory\n",
        action="store")

    parser.add_argument(
        '--ngraph_src_dir',
        type=str,
        help=
        "Local nGraph source directory to use. Overrides --ngraph_version.\n",
        action="store")

    parser.add_argument(
        '--ngraph_version',
        type=str,
        help="nGraph version to use. Overridden by --ngraph_src_dir. (Default: "
        + ngraph_version + ")\n",
        action="store")

    parser.add_argument(
        '--use_tensorflow_from_location',
        help=
        "Use TensorFlow from a directory where it was already built and stored.\n"
        "This location is expected to be populated by build_tf.py\n",
        action="store",
        default='')

    parser.add_argument(
        '--use_ngraph_staticlibs',
        help="Builds and links ngraph statically\n",
        action="store_true")

    # Done with the options. Now parse the commandline
    arguments = parser.parse_args()

    if (arguments.debug_build):
        print("Building in DEBUG mode\n")

    verbosity = False
    if (arguments.verbose_build):
        print("Building in with VERBOSE output messages\n")
        verbosity = True

    #-------------------------------
    # Recipe
    #-------------------------------

    version_check(arguments.use_prebuilt_tensorflow)

    # Default directories
    build_dir = 'build_cmake'

    assert not (
        arguments.use_tensorflow_from_location != '' and
        arguments.use_prebuilt_tensorflow
    ), "\"use_tensorflow_from_location\" and \"use_prebuilt_tensorflow\" "
    "cannot be used together."

    if arguments.use_tensorflow_from_location != '':
        # Check if the prebuilt folder has necessary files
        assert os.path.isdir(
            arguments.use_tensorflow_from_location
        ), "Prebuilt TF path " + arguments.use_tensorflow_from_location + " does not exist"
        loc = arguments.use_tensorflow_from_location + '/artifacts/tensorflow'
        assert os.path.isdir(
            loc), "Could not find artifacts/tensorflow directory"
        found_whl = False
        found_libtf_fw = False
        found_libtf_cc = False
        for i in os.listdir(loc):
            if '.whl' in i:
                found_whl = True
            if 'libtensorflow_cc' in i:
                found_libtf_cc = True
            if 'libtensorflow_framework' in i:
                found_libtf_fw = True
        assert found_whl, "Did not find TF whl file"
        assert found_libtf_fw, "Did not find libtensorflow_framework"
        assert found_libtf_cc, "Did not find libtensorflow_cc"

    try:
        os.makedirs(build_dir)
    except OSError as exc:  # Python >2.5
        if exc.errno == errno.EEXIST and os.path.isdir(build_dir):
            pass

    pwd = os.getcwd()
    ngraph_tf_src_dir = os.path.abspath(pwd)
    build_dir_abs = os.path.abspath(build_dir)
    os.chdir(build_dir)

    venv_dir = 'venv-tf-py3'
    artifacts_location = 'artifacts'
    if arguments.artifacts_dir:
        artifacts_location = os.path.abspath(arguments.artifacts_dir)

    artifacts_location = os.path.abspath(artifacts_location)
    print("ARTIFACTS location: " + artifacts_location)

    #If artifacts doesn't exist create
    if not os.path.isdir(artifacts_location):
        os.mkdir(artifacts_location)

    #install virtualenv
    install_virtual_env(venv_dir)

    # Load the virtual env
    load_venv(venv_dir)

    # Setup the virtual env
    setup_venv(venv_dir)

    target_arch = 'native'
    if (arguments.target_arch):
        target_arch = arguments.target_arch

    print("Target Arch: %s" % target_arch)

    # The cxx_abi flag is translated to _GLIBCXX_USE_CXX11_ABI
    # For gcc 4.8 - this flag is set to 0 and newer ones, this is set to 1
    # The specific value is determined from the TensorFlow build
    # Normally the shipped TensorFlow is built with gcc 4.8 and thus this
    # flag is set to 0
    cxx_abi = "0"

    if arguments.use_tensorflow_from_location != "":
        # Some asserts to make sure the directory structure of
        # use_tensorflow_from_location is correct. The location
        # should have: ./artifacts/tensorflow, which is expected
        # to contain one TF whl file, framework.so and cc.so
        print("Using TensorFlow from " + arguments.use_tensorflow_from_location)
        # The tf whl should be in use_tensorflow_from_location/artifacts/tensorflow
        tf_whl_loc = os.path.abspath(arguments.use_tensorflow_from_location +
                                     '/artifacts/tensorflow')
        possible_whl = [i for i in os.listdir(tf_whl_loc) if '.whl' in i]
        assert len(
            possible_whl
        ) == 1, "Expected one TF whl file, but found " + len(possible_whl)
        # Make sure there is exactly 1 TF whl
        tf_whl = os.path.abspath(tf_whl_loc + '/' + possible_whl[0])
        assert os.path.isfile(tf_whl), "Did not find " + tf_whl
        # Install the found TF whl file
        command_executor(["pip", "install", "-U", tf_whl])
        cxx_abi = get_tf_cxxabi()
        cwd = os.getcwd()
        os.chdir(tf_whl_loc)
        tf_in_artifacts = os.path.join(
            os.path.abspath(artifacts_location), "tensorflow")
        if os.path.isdir(tf_in_artifacts):
            print("TensorFlow already exists in artifacts. Using that")
        else:
            os.mkdir(tf_in_artifacts)
            # This function copies the .so files from
            # use_tensorflow_from_location/artifacts/tensorflow to
            # artifacts/tensorflow
            copy_tf_to_artifacts(tf_in_artifacts, tf_whl_loc)
        os.chdir(cwd)
    else:
        if arguments.use_prebuilt_tensorflow:
            print("Using existing TensorFlow")
            # Frst download the source. This will create the tensorfow directory as needed
            tf_src_dir = os.path.join(artifacts_location, "tensorflow")
            print("TF_SRC_DIR: ", tf_src_dir)
            # Download
            pwd_now = os.getcwd()
            os.chdir(artifacts_location)
            print("DOWNLOADING TF: PWD", os.getcwd())
            download_repo("tensorflow",
                          "https://github.com/tensorflow/tensorflow.git",
                          tf_version)
            os.chdir(pwd_now)

            # Next install the tensorflow python packge
            command_executor(
                ["pip", "install", "-U", "tensorflow==" + tf_version])
            cxx_abi = get_tf_cxxabi()

            # Copy the libtensorflow_framework.so to the artifacts so that
            # we can run c++ tests from that location later
            tf_fmwk_lib_name = 'libtensorflow_framework.so.1'
            if (platform.system() == 'Darwin'):
                tf_fmwk_lib_name = 'libtensorflow_framework.1.dylib'
            import tensorflow as tf
            tf_lib_dir = tf.sysconfig.get_lib()
            tf_lib_file = os.path.join(tf_lib_dir, tf_fmwk_lib_name)
            print("SYSCFG LIB: ", tf_lib_file)

            dst_dir = os.path.join(artifacts_location, "tensorflow")
            if not os.path.isdir(dst_dir):
                os.mkdir(dst_dir)

            dst = os.path.join(dst_dir, tf_fmwk_lib_name)
            shutil.copyfile(tf_lib_file, dst)

            # Now build the libtensorflow_cc.so - the C++ library
            build_tensorflow_cc(tf_src_dir, artifacts_location, target_arch,
                                verbosity)

        else:
            print("Building TensorFlow from source")
            # Download TensorFlow
            download_repo("tensorflow",
                          "https://github.com/tensorflow/tensorflow.git",
                          tf_version)
            tf_src_dir = os.path.join(os.getcwd(), "tensorflow")
            # Build TensorFlow
            build_tensorflow(venv_dir, "tensorflow", artifacts_location,
                             target_arch, verbosity)

            # Now build the libtensorflow_cc.so - the C++ library
            build_tensorflow_cc(tf_src_dir, artifacts_location, target_arch,
                                verbosity)

            # Install tensorflow to our own virtual env
            # Note that if gcc 4.8 is used for building TensorFlow this flag
            # will be 0
            cxx_abi = install_tensorflow(venv_dir, artifacts_location)

    if cxx_abi == 0:
        if not arguments.use_prebuilt_tensorflow:
            raise Exception(
                "Expected cxx_abi to be 0 when using 'use_prebuilt_tensorflow'")

    # Download nGraph if required.
    ngraph_src_dir = './ngraph'
    if arguments.ngraph_src_dir:
        ngraph_src_dir = arguments.ngraph_src_dir

        print("Using local nGraph source in directory ", ngraph_src_dir)
    else:
        if arguments.ngraph_version:
            ngraph_version = arguments.ngraph_version

        print("nGraph Version: ", ngraph_version)
        download_repo("ngraph", "https://github.com/NervanaSystems/ngraph.git",
                      ngraph_version)

    # Now build nGraph
    ngraph_cmake_flags = [
        "-DNGRAPH_INSTALL_PREFIX=" + artifacts_location,
        "-DNGRAPH_USE_CXX_ABI=" + cxx_abi, "-DNGRAPH_DEX_ONLY=TRUE",
        "-DNGRAPH_DEBUG_ENABLE=NO", "-DNGRAPH_UNIT_TEST_ENABLE=NO",
        "-DNGRAPH_TARGET_ARCH=" + target_arch,
        "-DNGRAPH_TUNE_ARCH=" + target_arch, "-DNGRAPH_TBB_ENABLE=FALSE"
    ]

    if arguments.use_ngraph_staticlibs:
        ngraph_cmake_flags.extend(["-DNGRAPH_STATIC_LIB_ENABLE=TRUE"])
        ngraph_cmake_flags.extend(["-DNGRAPH_CPU_STATIC_LIB_ENABLE=TRUE"])
        ngraph_cmake_flags.extend(
            ["-DNGRAPH_INTERPRETER_STATIC_LIB_ENABLE=TRUE"])

    if arguments.debug_build:
        ngraph_cmake_flags.extend(["-DCMAKE_BUILD_TYPE=Debug"])

    if (arguments.distributed_build == "OMPI"):
        ngraph_cmake_flags.extend(["-DNGRAPH_DISTRIBUTED_ENABLE=OMPI"])
    elif (arguments.distributed_build == "MLSL"):
        ngraph_cmake_flags.extend(["-DNGRAPH_DISTRIBUTED_ENABLE=MLSL"])
    else:
        ngraph_cmake_flags.extend(["-DNGRAPH_DISTRIBUTED_ENABLE=OFF"])

    if arguments.build_plaidml_backend:
        command_executor(["pip", "install", "-U", "plaidML"])

    flag_string_map = {True: 'YES', False: 'NO'}
    ngraph_cmake_flags.extend([
        "-DNGRAPH_TOOLS_ENABLE=" +
        flag_string_map[platform.system() != 'Darwin']
    ])
    ngraph_cmake_flags.extend(
        ["-DNGRAPH_GPU_ENABLE=" + flag_string_map[arguments.build_gpu_backend]])
    ngraph_cmake_flags.extend([
        "-DNGRAPH_PLAIDML_ENABLE=" +
        flag_string_map[arguments.build_plaidml_backend]
    ])
    ngraph_cmake_flags.extend([
        "-DNGRAPH_INTELGPU_ENABLE=" +
        flag_string_map[arguments.build_intelgpu_backend]
    ])

    build_ngraph(build_dir, ngraph_src_dir, ngraph_cmake_flags, verbosity)

    ngraph_tf_cmake_flags = [
        "-DNGRAPH_TF_INSTALL_PREFIX=" + artifacts_location,
        "-DUSE_PRE_BUILT_NGRAPH=ON",
        "-DUNIT_TEST_ENABLE=ON",
        "-DNGRAPH_TARGET_ARCH=" + target_arch,
        "-DNGRAPH_TUNE_ARCH=" + target_arch,
        "-DNGRAPH_ARTIFACTS_DIR=" + artifacts_location,
    ]

    if (arguments.use_ngraph_staticlibs):
        ngraph_tf_cmake_flags.extend(["-DNGRAPH_BRIDGE_STATIC_LIB_ENABLE=TRUE"])
    if (arguments.debug_build):
        ngraph_tf_cmake_flags.extend(["-DCMAKE_BUILD_TYPE=Debug"])

    if not arguments.use_prebuilt_tensorflow:
        if arguments.use_tensorflow_from_location:
            ngraph_tf_cmake_flags.extend([
                "-DTF_SRC_DIR=" + os.path.abspath(
                    arguments.use_tensorflow_from_location + '/tensorflow')
            ])
        else:
            ngraph_tf_cmake_flags.extend(["-DTF_SRC_DIR=" + tf_src_dir])
        ngraph_tf_cmake_flags.extend([
            "-DUNIT_TEST_TF_CC_DIR=" + os.path.join(artifacts_location,
                                                    "tensorflow")
        ])

    # Next build CMAKE options for the bridge
    if arguments.use_tensorflow_from_location:
        ngraph_tf_cmake_flags.extend([
            "-DTF_SRC_DIR=" + os.path.abspath(
                arguments.use_tensorflow_from_location + '/tensorflow')
        ])
    else:
        print("TF_SRC_DIR: ", tf_src_dir)
        ngraph_tf_cmake_flags.extend(["-DTF_SRC_DIR=" + tf_src_dir])

    ngraph_tf_cmake_flags.extend([
        "-DUNIT_TEST_TF_CC_DIR=" + os.path.join(artifacts_location,
                                                "tensorflow")
    ])

    if ((arguments.distributed_build == "OMPI") or
        (arguments.distributed_build == "MLSL")):
        ngraph_tf_cmake_flags.extend(["-DNGRAPH_DISTRIBUTED_ENABLE=TRUE"])
    else:
        ngraph_tf_cmake_flags.extend(["-DNGRAPH_DISTRIBUTED_ENABLE=FALSE"])

    ngraph_tf_cmake_flags.extend([
        "-DNGRAPH_TF_ENABLE_VARIABLES_AND_OPTIMIZERS=" +
        flag_string_map[arguments.enable_variables_and_optimizers]
    ])

    ngraph_tf_cmake_flags.extend([
        "-DNGRAPH_TF_USE_GRAPPLER_OPTIMIZER=" +
        flag_string_map[arguments.use_grappler_optimizer]
    ])

    # Now build the bridge
    ng_tf_whl = build_ngraph_tf(build_dir, artifacts_location,
                                ngraph_tf_src_dir, venv_dir,
                                ngraph_tf_cmake_flags, verbosity)

    # Make sure that the ngraph bridge whl is present in the artfacts directory
    if not os.path.isfile(os.path.join(artifacts_location, ng_tf_whl)):
        raise Exception("Cannot locate nGraph whl in the artifacts location")

    print("SUCCESSFULLY generated wheel: %s" % ng_tf_whl)
    print("PWD: " + os.getcwd())

    # Copy the TensorFlow Python code tree to artifacts directory so that they can
    # be used for running TensorFlow Python unit tests
    #
    # There are four possibilities:
    # 1. use_tensorflow_from_location is not defined
    #   2. In that case use_prebuilt_tensorflow is defined
    #       In this case we copy the entire tensorflow source to the artifacts
    #       So all we have to do is to create a symbolic link
    #       3. OR use_prebuilt_tensorflow is not defined
    # 4. use_tensorflow_from_location is defined
    if arguments.use_tensorflow_from_location == '':
        # Case 1
        if arguments.use_prebuilt_tensorflow:
            # Case 2
            base_dir = None
        else:
            # Case 3
            base_dir = build_dir_abs
    else:
        # Case 4
        base_dir = arguments.use_tensorflow_from_location

    if base_dir != None:
        command_executor([
            'cp', '-r', base_dir + '/tensorflow/tensorflow/python',
            os.path.join(artifacts_location, "tensorflow")
        ],
                         verbose=True)
    else:
        # Create a sym-link to
        link_src = os.path.join(artifacts_location,
                                "tensorflow/tensorflow/python")
        link_dst = os.path.join(artifacts_location, "tensorflow/python")
        command_executor(['ln', '-sf', link_src, link_dst], verbose=True)

    # Run a quick test
    install_ngraph_tf(venv_dir, os.path.join(artifacts_location, ng_tf_whl))

    if arguments.use_grappler_optimizer:
        import tensorflow as tf
        import ngraph_bridge
        if not ngraph_bridge.is_grappler_enabled():
            raise Exception(
                "Build failed: 'use_grappler_optimizer' specified but not used")

    print('\033[1;32mBuild successful\033[0m')
    os.chdir(pwd)
Esempio n. 21
0
def main():
    '''
    Builds TensorFlow, ngraph, and ngraph-tf for python 3
    '''

    # Component versions
    ngraph_version = "94456090176ad6abda633b496b89cc16157ed4b0"  #add codegen support to cpu backend (#4679) ,May 26
    tf_version = "v2.2.0"

    # Command line parser options
    parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)

    parser.add_argument(
        '--debug_build',
        help="Builds a debug version of the nGraph components\n",
        action="store_true")

    parser.add_argument('--verbose_build',
                        help="Display verbose error messages\n",
                        action="store_true")

    parser.add_argument(
        '--target_arch',
        help=
        "Architecture flag to use (e.g., haswell, core-avx2 etc. Default \'native\'\n",
    )

    parser.add_argument('--build_openvino_backend',
                        help="Build OpenVINO backend\n",
                        action="store_true")

    parser.add_argument(
        '--use_prebuilt_openvino',
        type=str,
        help=
        "Skip building OpenVINO and use the pre-built version from the specified directory.\n",
        action="store")

    parser.add_argument(
        '--use_prebuilt_tensorflow',
        type=str,
        help=
        "Skip building TensorFlow and use the specified prebuilt version.\n" +
        "If prebuilt version isn't specified, TF version " + tf_version +
        " will be used.\n" +
        "Note: in this case C++ API, unit tests and examples won't be build for nGraph-TF bridge",
        const=tf_version,
        default='',
        nargs='?',
        action="store")

    parser.add_argument(
        '--use_prebuilt_ngraph',
        type=str,
        help=
        "Skip building ngraph and use pre-built version from the specified directory.\n",
        action="store")

    parser.add_argument(
        '--use_grappler_optimizer',
        help="Use Grappler optimizer instead of the optimization passes\n",
        action="store_true")

    parser.add_argument('--artifacts_dir',
                        type=str,
                        help="Copy the artifacts to the given directory\n",
                        action="store")

    parser.add_argument(
        '--ngraph_src_dir',
        type=str,
        help=
        "Local nGraph source directory to use. Overrides --ngraph_version.\n",
        action="store")

    parser.add_argument(
        '--ngraph_version',
        type=str,
        help="nGraph version to use. Overridden by --ngraph_src_dir. (Default: "
        + ngraph_version + ")\n",
        action="store")

    parser.add_argument(
        '--use_tensorflow_from_location',
        help=
        "Use TensorFlow from a directory where it was already built and stored.\n"
        "This location is expected to be populated by build_tf.py\n",
        action="store",
        default='')

    parser.add_argument('--use_ngraph_staticlibs',
                        help="Builds and links ngraph statically\n",
                        action="store_true")

    parser.add_argument('--disable_cpp_api',
                        help="Disables C++ API, unit tests and examples\n",
                        action="store_true")

    # Done with the options. Now parse the commandline
    arguments = parser.parse_args()

    if (arguments.debug_build):
        print("Building in DEBUG mode\n")

    verbosity = False
    if (arguments.verbose_build):
        print("Building in with VERBOSE output messages\n")
        verbosity = True

    #-------------------------------
    # Recipe
    #-------------------------------

    # Default directories
    build_dir = 'build_cmake'

    assert not (
        arguments.use_tensorflow_from_location != ''
        and arguments.use_prebuilt_tensorflow != ''
    ), "\"use_tensorflow_from_location\" and \"use_prebuilt_tensorflow\" "
    "cannot be used together."

    version_check((arguments.use_prebuilt_tensorflow != ''),
                  (arguments.use_tensorflow_from_location != ''),
                  arguments.disable_cpp_api)

    if arguments.use_tensorflow_from_location != '':
        # Check if the prebuilt folder has necessary files
        assert os.path.isdir(
            arguments.use_tensorflow_from_location
        ), "Prebuilt TF path " + arguments.use_tensorflow_from_location + " does not exist"
        loc = arguments.use_tensorflow_from_location + '/artifacts/tensorflow'
        assert os.path.isdir(
            loc), "Could not find artifacts/tensorflow directory"
        found_whl = False
        found_libtf_fw = False
        found_libtf_cc = False
        for i in os.listdir(loc):
            if '.whl' in i:
                found_whl = True
            if 'libtensorflow_cc' in i:
                found_libtf_cc = True
            if 'libtensorflow_framework' in i:
                found_libtf_fw = True
        assert found_whl, "Did not find TF whl file"
        assert found_libtf_fw, "Did not find libtensorflow_framework"
        assert found_libtf_cc, "Did not find libtensorflow_cc"

    try:
        os.makedirs(build_dir)
    except OSError as exc:  # Python >2.5
        if exc.errno == errno.EEXIST and os.path.isdir(build_dir):
            pass

    pwd = os.getcwd()
    ngraph_tf_src_dir = os.path.abspath(pwd)
    print("NGTF SRC DIR: " + ngraph_tf_src_dir)
    build_dir_abs = os.path.abspath(build_dir)
    os.chdir(build_dir)

    venv_dir = 'venv-tf-py3'
    artifacts_location = 'artifacts'
    if arguments.artifacts_dir:
        artifacts_location = os.path.abspath(arguments.artifacts_dir)

    artifacts_location = os.path.abspath(artifacts_location)
    print("ARTIFACTS location: " + artifacts_location)

    #If artifacts doesn't exist create
    if not os.path.isdir(artifacts_location):
        os.mkdir(artifacts_location)

    #install virtualenv
    install_virtual_env(venv_dir)

    # Load the virtual env
    load_venv(venv_dir)

    # Setup the virtual env
    setup_venv(venv_dir)

    target_arch = 'native'
    if (arguments.target_arch):
        target_arch = arguments.target_arch

    print("Target Arch: %s" % target_arch)

    if arguments.use_prebuilt_tensorflow != '':
        tf_version = arguments.use_prebuilt_tensorflow

    # The cxx_abi flag is translated to _GLIBCXX_USE_CXX11_ABI
    # For gcc older than 5.3, this flag is set to 0 and for newer ones,
    # this is set to 1
    # The specific value is determined from the TensorFlow build
    # Normally the shipped TensorFlow going forward is built with gcc 7.3
    # and thus this flag is set to 1
    cxx_abi = "1"

    if arguments.use_tensorflow_from_location != "":
        # Some asserts to make sure the directory structure of
        # use_tensorflow_from_location is correct. The location
        # should have: ./artifacts/tensorflow, which is expected
        # to contain one TF whl file, framework.so and cc.so
        print("Using TensorFlow from " +
              arguments.use_tensorflow_from_location)
        # The tf whl should be in use_tensorflow_from_location/artifacts/tensorflow
        tf_whl_loc = os.path.abspath(arguments.use_tensorflow_from_location +
                                     '/artifacts/tensorflow')
        possible_whl = [i for i in os.listdir(tf_whl_loc) if '.whl' in i]
        assert len(
            possible_whl
        ) == 1, "Expected one TF whl file, but found " + len(possible_whl)
        # Make sure there is exactly 1 TF whl
        tf_whl = os.path.abspath(tf_whl_loc + '/' + possible_whl[0])
        assert os.path.isfile(tf_whl), "Did not find " + tf_whl
        # Install the found TF whl file
        command_executor(["pip", "install", "-U", tf_whl])
        cxx_abi = get_tf_cxxabi()
        cwd = os.getcwd()
        os.chdir(tf_whl_loc)
        tf_in_artifacts = os.path.join(os.path.abspath(artifacts_location),
                                       "tensorflow")
        if not os.path.isdir(tf_in_artifacts):
            os.mkdir(tf_in_artifacts)
        # This function copies the .so files from
        # use_tensorflow_from_location/artifacts/tensorflow to
        # artifacts/tensorflow
        copy_tf_to_artifacts(tf_version, tf_in_artifacts, tf_whl_loc)
        os.chdir(cwd)
    else:
        if arguments.use_prebuilt_tensorflow != '':
            print("Using existing TensorFlow version", tf_version)
            # Install TensorFlow
            command_executor(
                ["pip", "install", "-U", "tensorflow==" + tf_version])
            cxx_abi = get_tf_cxxabi()

            if not arguments.disable_cpp_api:
                tf_src_dir = os.path.join(artifacts_location, "tensorflow")
                print("TF_SRC_DIR: ", tf_src_dir)
                # Download TF source
                pwd_now = os.getcwd()
                os.chdir(artifacts_location)
                print("DOWNLOADING TF: PWD", os.getcwd())
                download_repo("tensorflow",
                              "https://github.com/tensorflow/tensorflow.git",
                              tf_version)
                os.chdir(pwd_now)

                # Now build the libtensorflow_cc.so - the C++ library
                build_tensorflow_cc(tf_version, tf_src_dir, artifacts_location,
                                    target_arch, verbosity)
        else:
            print("Building TensorFlow from source")
            # Download TensorFlow
            download_repo("tensorflow",
                          "https://github.com/tensorflow/tensorflow.git",
                          tf_version)
            tf_src_dir = os.path.join(os.getcwd(), "tensorflow")
            print("TF_SRC_DIR: ", tf_src_dir)

            # Build TensorFlow
            build_tensorflow(tf_version, "tensorflow", artifacts_location,
                             target_arch, verbosity)

            # Now build the libtensorflow_cc.so - the C++ library
            build_tensorflow_cc(tf_version, tf_src_dir, artifacts_location,
                                target_arch, verbosity)

            # Install tensorflow to our own virtual env
            # Note that if gcc 7.3 is used for building TensorFlow this flag
            # will be 1
            cxx_abi = install_tensorflow(venv_dir, artifacts_location)

        # Finally, copy the libtensorflow_framework.so to the artifacts so that
        # we can run c++ tests from that location later
        tf_fmwk_lib_name = 'libtensorflow_framework.so.2'
        if (platform.system() == 'Darwin'):
            tf_fmwk_lib_name = 'libtensorflow_framework.2.dylib'
        import tensorflow as tf
        tf_lib_dir = tf.sysconfig.get_lib()
        tf_lib_file = os.path.join(tf_lib_dir, tf_fmwk_lib_name)
        print("SYSCFG LIB: ", tf_lib_file)
        dst_dir = os.path.join(artifacts_location, "tensorflow")
        if not os.path.isdir(dst_dir):
            os.mkdir(dst_dir)
        dst = os.path.join(dst_dir, tf_fmwk_lib_name)
        shutil.copyfile(tf_lib_file, dst)

    # Build OpenVINO if required.
    if arguments.build_openvino_backend:
        if not arguments.use_prebuilt_openvino:
            openvino_version = "releases/2020/4"
            openvino_src_dir = "./openvino"
            download_repo("openvino",
                          "https://github.com/openvinotoolkit/openvino",
                          openvino_version,
                          submodule_update=True)

            # Now build OpenVINO
            openvino_cmake_flags = [
                "-DENABLE_TESTS=OFF",
                "-DENABLE_FUNCTIONAL_TESTS=OFF",
                "-DENABLE_VPU=OFF",  # TODO: Fix OpenVINO VPU build
                "-DENABLE_CPPLINT=OFF",
                "-DENABLE_SPEECH_DEMO=FALSE",
                "-DCMAKE_INSTALL_RPATH=\"$ORIGIN\"",
                "-DCMAKE_INSTALL_PREFIX=" +
                os.path.join(artifacts_location, "openvino")
            ]

            if arguments.debug_build:
                openvino_cmake_flags.extend(["-DCMAKE_BUILD_TYPE=Debug"])

            cmake_build(build_dir, openvino_src_dir, openvino_cmake_flags,
                        verbosity)
    else:
        # Skip building nGraph if we're building OpenVINO
        # Build nGraph if required.
        if not arguments.use_prebuilt_ngraph:
            ngraph_src_dir = './ngraph'
            if arguments.ngraph_src_dir:
                ngraph_src_dir = arguments.ngraph_src_dir
                print("Using local nGraph source in directory ",
                      ngraph_src_dir)
            else:
                if arguments.ngraph_version:
                    ngraph_version = arguments.ngraph_version

                print("nGraph Version: ", ngraph_version)
                download_repo("ngraph",
                              "https://github.com/NervanaSystems/ngraph.git",
                              ngraph_version)

            # Now build nGraph
            ngraph_cmake_flags = [
                "-DNGRAPH_INSTALL_PREFIX=" + artifacts_location,
                "-DNGRAPH_USE_CXX_ABI=" + cxx_abi, "-DNGRAPH_DEX_ONLY=TRUE",
                "-DNGRAPH_DEBUG_ENABLE=NO", "-DNGRAPH_UNIT_TEST_ENABLE=NO",
                "-DNGRAPH_TARGET_ARCH=" + target_arch,
                "-DNGRAPH_TUNE_ARCH=" + target_arch,
                "-DNGRAPH_TBB_ENABLE=FALSE"
            ]

            if arguments.use_ngraph_staticlibs:
                ngraph_cmake_flags.extend(["-DNGRAPH_STATIC_LIB_ENABLE=TRUE"])
                ngraph_cmake_flags.extend(
                    ["-DNGRAPH_CPU_STATIC_LIB_ENABLE=TRUE"])
                ngraph_cmake_flags.extend(
                    ["-DNGRAPH_INTERPRETER_STATIC_LIB_ENABLE=TRUE"])
                ngraph_cmake_flags.extend(
                    ["-DNGRAPH_DYNAMIC_COMPONENTS_ENABLE=OFF"])

            if arguments.debug_build:
                ngraph_cmake_flags.extend(["-DCMAKE_BUILD_TYPE=Debug"])

            ngraph_cmake_flags.extend([
                "-DNGRAPH_TOOLS_ENABLE=" +
                flag_string_map[platform.system() != 'Darwin']
            ])

            cmake_build(build_dir, ngraph_src_dir, ngraph_cmake_flags,
                        verbosity)

    # Next build CMAKE options for the bridge
    ngraph_tf_cmake_flags = [
        "-DNGRAPH_TF_INSTALL_PREFIX=" + artifacts_location,
        "-DUSE_PRE_BUILT_NGRAPH=ON",
        "-DNGRAPH_TARGET_ARCH=" + target_arch,
        "-DNGRAPH_TUNE_ARCH=" + target_arch,
    ]

    if arguments.build_openvino_backend:
        openvino_artifacts_dir = ""
        if not arguments.use_prebuilt_openvino:
            openvino_artifacts_dir = os.path.join(artifacts_location,
                                                  "openvino")
        else:
            openvino_artifacts_dir = os.path.abspath(
                arguments.use_prebuilt_openvino)
            ngraph_tf_cmake_flags.extend(["-DUSE_PREBUILT_OPENVINO=TRUE"])

        ngraph_tf_cmake_flags.extend(["-DENABLE_OPENVINO=ON"])
        ngraph_tf_cmake_flags.extend(
            ["-DOPENVINO_ARTIFACTS_DIR=" + openvino_artifacts_dir])
        ngraph_tf_cmake_flags.extend(
            ["-DNGRAPH_ARTIFACTS_DIR=" + openvino_artifacts_dir])
    else:
        if not arguments.use_prebuilt_ngraph:
            ngraph_tf_cmake_flags.extend(
                ["-DNGRAPH_ARTIFACTS_DIR=" + artifacts_location])
        else:
            ngraph_tf_cmake_flags.extend([
                "-DNGRAPH_ARTIFACTS_DIR=" +
                os.path.abspath(arguments.use_prebuilt_ngraph)
            ])

    if (arguments.use_ngraph_staticlibs):
        ngraph_tf_cmake_flags.extend(
            ["-DNGRAPH_BRIDGE_STATIC_LIB_ENABLE=TRUE"])

    if (arguments.debug_build):
        ngraph_tf_cmake_flags.extend(["-DCMAKE_BUILD_TYPE=Debug"])

    if arguments.use_tensorflow_from_location:
        ngraph_tf_cmake_flags.extend([
            "-DTF_SRC_DIR=" +
            os.path.abspath(arguments.use_tensorflow_from_location +
                            '/tensorflow')
        ])
    else:
        if not arguments.disable_cpp_api:
            print("TF_SRC_DIR: ", tf_src_dir)
            ngraph_tf_cmake_flags.extend(["-DTF_SRC_DIR=" + tf_src_dir])

    ngraph_tf_cmake_flags.extend(["-DUNIT_TEST_ENABLE=ON"])
    if not arguments.disable_cpp_api:
        ngraph_tf_cmake_flags.extend([
            "-DUNIT_TEST_TF_CC_DIR=" +
            os.path.join(artifacts_location, "tensorflow")
        ])

    ngraph_tf_cmake_flags.extend([
        "-DNGRAPH_TF_USE_GRAPPLER_OPTIMIZER=" +
        flag_string_map[arguments.use_grappler_optimizer]
    ])

    # Now build the bridge
    ng_tf_whl = build_ngraph_tf(build_dir, artifacts_location,
                                ngraph_tf_src_dir, venv_dir,
                                ngraph_tf_cmake_flags, verbosity)

    # Make sure that the ngraph bridge whl is present in the artfacts directory
    if not os.path.isfile(os.path.join(artifacts_location, ng_tf_whl)):
        raise Exception("Cannot locate nGraph whl in the artifacts location")

    print("SUCCESSFULLY generated wheel: %s" % ng_tf_whl)
    print("PWD: " + os.getcwd())

    # Copy the TensorFlow Python code tree to artifacts directory so that they can
    # be used for running TensorFlow Python unit tests
    #
    # There are four possibilities:
    # 1. use_tensorflow_from_location is not defined
    #   2. In that case use_prebuilt_tensorflow is defined
    #       In this case we copy the entire tensorflow source to the artifacts
    #       So all we have to do is to create a symbolic link
    #       3. OR use_prebuilt_tensorflow is not defined
    # 4. use_tensorflow_from_location is defined
    if arguments.use_tensorflow_from_location == '':
        # Case 1
        if arguments.use_prebuilt_tensorflow != '':
            # Case 2
            base_dir = None
        else:
            # Case 3
            base_dir = build_dir_abs
    else:
        # Case 4
        base_dir = arguments.use_tensorflow_from_location

    if base_dir != None:
        dest_dir = os.path.join(artifacts_location, "tensorflow")
        command_executor(
            ['cp', '-r', base_dir + '/tensorflow/tensorflow/python', dest_dir],
            verbose=True)
    else:
        # Create a sym-link to
        link_src = os.path.join(artifacts_location,
                                "tensorflow/tensorflow/python")
        link_dst = os.path.join(artifacts_location, "tensorflow/python")
        command_executor(['ln', '-sf', link_src, link_dst], verbose=True)

    # Run a quick test
    install_ngraph_tf(tf_version, venv_dir,
                      os.path.join(artifacts_location, ng_tf_whl))

    if arguments.use_grappler_optimizer:
        import tensorflow as tf
        import ngraph_bridge
        if not ngraph_bridge.is_grappler_enabled():
            raise Exception(
                "Build failed: 'use_grappler_optimizer' specified but not used"
            )

    print('\033[1;32mBuild successful\033[0m')
    os.chdir(pwd)
Esempio n. 22
0
def run_resnet50_from_artifacts(ngraph_tf_src_dir, artifact_dir, batch_size,
                                iterations):
    root_pwd = os.getcwd()
    artifact_dir = os.path.abspath(artifact_dir)
    ngraph_tf_src_dir = os.path.abspath(ngraph_tf_src_dir)
    install_ngraph_bridge(artifact_dir)

    # Now clone the repo and proceed
    call(['git', 'clone', 'https://github.com/tensorflow/benchmarks.git'])
    os.chdir('benchmarks')
    call(['git', 'checkout', 'aef6daa90a467a1fc7ce8395cd0067e5fda1ecff'])

    # Check to see if we need to patch the repo for Grappler
    # benchmark_cnn.patch will only work for the CPU backend
    patch_file = os.path.abspath(
        os.path.join(ngraph_tf_src_dir, "test/grappler/benchmark_cnn.patch"))
    import ngraph_bridge
    if ngraph_bridge.is_grappler_enabled():
        print("Patching repo using: %s" % patch_file)
        apply_patch(patch_file)

    os.chdir('scripts/tf_cnn_benchmarks/')

    # junit_script = os.path.abspath('%s/test/ci/junit-wrap.sh' % root_pwd)

    # Update the script by adding `import ngraph_bridge`
    with open('convnet_builder.py', 'a') as outfile:
        call(['echo', 'import ngraph_bridge'], stdout=outfile)

    # Setup the env flags
    import psutil
    num_cores = int(psutil.cpu_count(logical=False))
    print("OMP_NUM_THREADS: %s " % str(num_cores))

    os.environ['OMP_NUM_THREADS'] = str(num_cores)
    os.environ["KMP_AFFINITY"] = 'granularity=fine,compact,1,0'

    # Delete the temporary model save directory
    model_save_dir = os.getcwd() + '/modelsavepath'
    if os.path.exists(model_save_dir) and os.path.isdir(model_save_dir):
        shutil.rmtree(model_save_dir)

    eval_eventlog_dir = os.getcwd() + '/eval_eventlog_dir'
    if os.path.exists(eval_eventlog_dir) and os.path.isdir(eval_eventlog_dir):
        shutil.rmtree(eval_eventlog_dir)

    cmd = [
        'python', 'tf_cnn_benchmarks.py', '--data_format', 'NHWC',
        '--num_inter_threads', '1', '--train_dir=' + model_save_dir,
        '--num_batches',
        str(iterations), '--model=resnet50', '--batch_size=' + str(batch_size),
        '--eval_dir=' + eval_eventlog_dir
    ]
    command_executor(cmd, verbose=True)
    cmd = [
        'python', 'tf_cnn_benchmarks.py', '--data_format', 'NHWC',
        '--num_inter_threads', '1', '--train_dir=' + model_save_dir,
        '--model=resnet50', '--batch_size=' + str(batch_size), '--num_batches',
        str(iterations), '--eval', '--eval_dir=' + eval_eventlog_dir
    ]
    # Commenting the eval since it currently fails with TF2.0
    command_executor(cmd, verbose=True)

    os.chdir(root_pwd)
Esempio n. 23
0
def main():
    '''
    Builds TensorFlow, OpenVINO, and ngraph-tf for python 3
    '''

    # Component versions
    tf_version = "v2.2.0"
    use_intel_tf = False
    openvino_version = "releases/2021/2"

    # Command line parser options
    parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)

    parser.add_argument('--debug_build',
                        help="Builds a debug version of the components\n",
                        action="store_true")

    parser.add_argument('--verbose_build',
                        help="Display verbose error messages\n",
                        action="store_true")

    parser.add_argument(
        '--target_arch',
        help=
        "Architecture flag to use (e.g., haswell, core-avx2 etc. Default \'native\'\n",
    )

    parser.add_argument(
        '--use_prebuilt_tensorflow',
        type=str,
        help=
        "Skip building TensorFlow and use the specified prebuilt version.\n" +
        "If prebuilt version isn't specified, TF version " + tf_version +
        " will be used.\n" +
        "Note: in this case C++ API, unit tests and examples won't be build for nGraph-TF bridge",
        const=tf_version,
        default='',
        nargs='?',
        action="store")

    parser.add_argument(
        '--use_intel_tensorflow',
        help="Use Intel TensorFlow for either building from source or in \n" +
        "conjunction with --use_prebuilt_tensorflow or --use_tensorflow_from_location.",
        default='',
        action="store_true")

    parser.add_argument(
        '--use_grappler_optimizer',
        help="Use Grappler optimizer instead of the optimization passes\n",
        action="store_true")

    parser.add_argument('--artifacts_dir',
                        type=str,
                        help="Copy the artifacts to the given directory\n",
                        action="store")

    parser.add_argument(
        '--use_tensorflow_from_location',
        help=
        "Use TensorFlow from a directory where it was already built and stored.\n"
        "NOTE: This location is expected to be populated by build_tf.py\n",
        action="store",
        default='')

    parser.add_argument(
        '--use_openvino_from_location',
        help=
        "Use OpenVINO from a directory where it was already built and stored.\n"
        "NOTE: This location is expected to be populated by build_ov.py\n",
        action="store",
        default='')

    parser.add_argument('--disable_cpp_api',
                        help="Disables C++ API, unit tests and examples\n",
                        action="store_true")

    # Done with the options. Now parse the commandline
    arguments = parser.parse_args()

    if (arguments.debug_build):
        print("Building in debug mode\n")

    verbosity = False
    if (arguments.verbose_build):
        print("Building with verbose output messages\n")
        verbosity = True

    #-------------------------------
    # Recipe
    #-------------------------------

    # Default directories
    build_dir = 'build_cmake'

    assert not (
        arguments.use_tensorflow_from_location != ''
        and arguments.use_prebuilt_tensorflow != ''
    ), "\"use_tensorflow_from_location\" and \"use_prebuilt_tensorflow\""
    "cannot be used together."

    version_check((arguments.use_prebuilt_tensorflow != ''),
                  (arguments.use_tensorflow_from_location != ''),
                  arguments.disable_cpp_api)

    if arguments.use_tensorflow_from_location != '':
        # Check if the prebuilt folder has necessary files
        assert os.path.isdir(
            arguments.use_tensorflow_from_location
        ), "Prebuilt TF path " + arguments.use_tensorflow_from_location + " does not exist"
        loc = arguments.use_tensorflow_from_location + '/artifacts/tensorflow'
        assert os.path.isdir(
            loc), "Could not find artifacts/tensorflow directory"
        found_whl = False
        found_libtf_fw = False
        found_libtf_cc = False
        for i in os.listdir(loc):
            if '.whl' in i:
                found_whl = True
            if 'libtensorflow_cc' in i:
                found_libtf_cc = True
            if 'libtensorflow_framework' in i:
                found_libtf_fw = True
        assert found_whl, "Did not find TF whl file"
        assert found_libtf_fw, "Did not find libtensorflow_framework"
        assert found_libtf_cc, "Did not find libtensorflow_cc"

    try:
        os.makedirs(build_dir)
    except OSError as exc:  # Python >2.5
        if exc.errno == errno.EEXIST and os.path.isdir(build_dir):
            pass

    pwd = os.getcwd()
    ngraph_tf_src_dir = os.path.abspath(pwd)
    print("NGTF SRC DIR: " + ngraph_tf_src_dir)
    build_dir_abs = os.path.abspath(build_dir)
    os.chdir(build_dir)

    venv_dir = 'venv-tf-py3'
    artifacts_location = 'artifacts'
    if arguments.artifacts_dir:
        artifacts_location = os.path.abspath(arguments.artifacts_dir)

    artifacts_location = os.path.abspath(artifacts_location)
    print("ARTIFACTS location: " + artifacts_location)

    #If artifacts doesn't exist create
    if not os.path.isdir(artifacts_location):
        os.mkdir(artifacts_location)

    #install virtualenv
    install_virtual_env(venv_dir)

    # Load the virtual env
    load_venv(venv_dir)

    # Setup the virtual env
    setup_venv(venv_dir)

    target_arch = 'native'
    if (arguments.target_arch):
        target_arch = arguments.target_arch

    print("Target Arch: %s" % target_arch)

    if arguments.use_prebuilt_tensorflow != '':
        tf_version = arguments.use_prebuilt_tensorflow

    if arguments.use_intel_tensorflow != '':
        use_intel_tf = True
        print("Using Intel Tensorflow")

    # The cxx_abi flag is translated to _GLIBCXX_USE_CXX11_ABI
    # For gcc older than 5.3, this flag is set to 0 and for newer ones,
    # this is set to 1
    # The specific value is determined from the TensorFlow build
    # Normally the shipped TensorFlow going forward is built with gcc 7.3
    # and thus this flag is set to 1
    cxx_abi = "1"

    if arguments.use_tensorflow_from_location != "":
        # Some asserts to make sure the directory structure of
        # use_tensorflow_from_location is correct. The location
        # should have: ./artifacts/tensorflow, which is expected
        # to contain one TF whl file, framework.so and cc.so
        print("Using TensorFlow from " +
              arguments.use_tensorflow_from_location)
        # The tf whl should be in use_tensorflow_from_location/artifacts/tensorflow
        tf_whl_loc = os.path.abspath(arguments.use_tensorflow_from_location +
                                     '/artifacts/tensorflow')
        possible_whl = [i for i in os.listdir(tf_whl_loc) if '.whl' in i]
        assert len(
            possible_whl
        ) == 1, "Expected one TF whl file, but found " + len(possible_whl)
        # Make sure there is exactly 1 TF whl
        tf_whl = os.path.abspath(tf_whl_loc + '/' + possible_whl[0])
        assert os.path.isfile(tf_whl), "Did not find " + tf_whl
        # Install the found TF whl file
        command_executor(["pip", "install", "-U", tf_whl])
        cxx_abi = get_tf_cxxabi()
        cwd = os.getcwd()
        os.chdir(tf_whl_loc)
        tf_in_artifacts = os.path.join(os.path.abspath(artifacts_location),
                                       "tensorflow")
        if not os.path.isdir(tf_in_artifacts):
            os.mkdir(tf_in_artifacts)
        # This function copies the .so files from
        # use_tensorflow_from_location/artifacts/tensorflow to
        # artifacts/tensorflow
        copy_tf_to_artifacts(tf_version, tf_in_artifacts, tf_whl_loc,
                             use_intel_tf)
        os.chdir(cwd)
    else:
        if arguments.use_prebuilt_tensorflow != '':
            print("Using TensorFlow version", tf_version)
            if use_intel_tf:
                print("Install Intel Tensorflow")
                command_executor([
                    "pip", "install", "-U", "intel-tensorflow==" + tf_version
                ])
            else:
                print("Install native TensorFlow")
                command_executor(
                    ["pip", "install", "-U", "tensorflow==" + tf_version])
            cxx_abi = get_tf_cxxabi()

            tf_src_dir = os.path.join(artifacts_location, "tensorflow")
            print("TF_SRC_DIR: ", tf_src_dir)
            # Download TF source for enabling TF python tests
            pwd_now = os.getcwd()
            os.chdir(artifacts_location)
            print("DOWNLOADING TF: PWD", os.getcwd())
            download_repo("tensorflow",
                          "https://github.com/tensorflow/tensorflow.git",
                          tf_version)
            os.chdir(pwd_now)
            # Finally, copy the libtensorflow_framework.so to the artifacts
            tf_fmwk_lib_name = 'libtensorflow_framework.so.2'
            if (platform.system() == 'Darwin'):
                tf_fmwk_lib_name = 'libtensorflow_framework.2.dylib'
            import tensorflow as tf
            tf_lib_dir = tf.sysconfig.get_lib()
            tf_lib_file = os.path.join(tf_lib_dir, tf_fmwk_lib_name)
            print("SYSCFG LIB: ", tf_lib_file)
            dst_dir = os.path.join(artifacts_location, "tensorflow")
            if not os.path.isdir(dst_dir):
                os.mkdir(dst_dir)
            dst = os.path.join(dst_dir, tf_fmwk_lib_name)
            shutil.copyfile(tf_lib_file, dst)
        else:
            print("Building TensorFlow from source")
            # Download TensorFlow
            download_repo("tensorflow",
                          "https://github.com/tensorflow/tensorflow.git",
                          tf_version)
            tf_src_dir = os.path.join(os.getcwd(), "tensorflow")
            print("TF_SRC_DIR: ", tf_src_dir)

            # Build TensorFlow
            build_tensorflow(tf_version, "tensorflow", artifacts_location,
                             target_arch, verbosity, use_intel_tf)

            # Now build the libtensorflow_cc.so - the C++ library
            build_tensorflow_cc(tf_version, tf_src_dir, artifacts_location,
                                target_arch, verbosity, use_intel_tf)

            # Install tensorflow to our own virtual env
            # Note that if gcc 7.3 is used for building TensorFlow this flag
            # will be 1
            cxx_abi = install_tensorflow(venv_dir, artifacts_location)

            # This function copies the .so files from
            # use_tensorflow_from_location/artifacts/tensorflow to
            # artifacts/tensorflow
            cwd = os.getcwd()
            os.chdir(tf_src_dir)
            dst_dir = os.path.join(artifacts_location, "tensorflow")
            copy_tf_to_artifacts(tf_version, dst_dir, None, use_intel_tf)
            os.chdir(cwd)

    if arguments.use_openvino_from_location != "":
        print("Using OpenVINO from " + arguments.use_openvino_from_location)
    else:
        print("Building OpenVINO from source")
        print(
            "NOTE: OpenVINO python module is not built when building from source."
        )

        # Download OpenVINO
        download_repo("openvino",
                      "https://github.com/openvinotoolkit/openvino",
                      openvino_version,
                      submodule_update=True)
        openvino_src_dir = os.path.join(os.getcwd(), "openvino")
        print("OV_SRC_DIR: ", openvino_src_dir)

        build_openvino(build_dir, openvino_src_dir, cxx_abi, target_arch,
                       artifacts_location, arguments.debug_build, verbosity)

    # Next build CMAKE options for the bridge
    ngraph_tf_cmake_flags = [
        "-DNGRAPH_TF_INSTALL_PREFIX=" + artifacts_location,
        "-DCMAKE_CXX_FLAGS=-march=" + target_arch,
    ]

    openvino_artifacts_dir = ""
    if arguments.use_openvino_from_location == '':
        openvino_artifacts_dir = os.path.join(artifacts_location, "openvino")
    else:
        openvino_artifacts_dir = os.path.abspath(
            arguments.use_openvino_from_location)
        ngraph_tf_cmake_flags.extend(["-DUSE_OPENVINO_FROM_LOCATION=TRUE"])

    ngraph_tf_cmake_flags.extend(
        ["-DOPENVINO_ARTIFACTS_DIR=" + openvino_artifacts_dir])

    if (arguments.debug_build):
        ngraph_tf_cmake_flags.extend(["-DCMAKE_BUILD_TYPE=Debug"])

    if arguments.use_tensorflow_from_location:
        ngraph_tf_cmake_flags.extend([
            "-DTF_SRC_DIR=" +
            os.path.abspath(arguments.use_tensorflow_from_location +
                            '/tensorflow')
        ])
    else:
        if not arguments.disable_cpp_api and not arguments.use_prebuilt_tensorflow:
            print("TF_SRC_DIR: ", tf_src_dir)
            ngraph_tf_cmake_flags.extend(["-DTF_SRC_DIR=" + tf_src_dir])

    ngraph_tf_cmake_flags.extend(["-DUNIT_TEST_ENABLE=ON"])
    if not arguments.disable_cpp_api and not arguments.use_prebuilt_tensorflow:
        ngraph_tf_cmake_flags.extend([
            "-DUNIT_TEST_TF_CC_DIR=" +
            os.path.join(artifacts_location, "tensorflow")
        ])

    ngraph_tf_cmake_flags.extend([
        "-DNGRAPH_TF_USE_GRAPPLER_OPTIMIZER=" +
        flag_string_map[arguments.use_grappler_optimizer]
    ])

    # Now build the bridge
    ng_tf_whl = build_ngraph_tf(build_dir, artifacts_location,
                                ngraph_tf_src_dir, venv_dir,
                                ngraph_tf_cmake_flags, verbosity)

    # Make sure that the ngraph bridge whl is present in the artfacts directory
    if not os.path.isfile(os.path.join(artifacts_location, ng_tf_whl)):
        raise Exception("Cannot locate nGraph whl in the artifacts location")

    print("SUCCESSFULLY generated wheel: %s" % ng_tf_whl)
    print("PWD: " + os.getcwd())

    # Copy the TensorFlow Python code tree to artifacts directory so that they can
    # be used for running TensorFlow Python unit tests
    #
    # There are four possibilities:
    # 1. use_tensorflow_from_location is not defined
    #   2. In that case use_prebuilt_tensorflow is defined
    #       In this case we copy the entire tensorflow source to the artifacts
    #       So all we have to do is to create a symbolic link
    #       3. OR use_prebuilt_tensorflow is not defined
    # 4. use_tensorflow_from_location is defined
    if arguments.use_tensorflow_from_location == '':
        # Case 1
        if arguments.use_prebuilt_tensorflow != '':
            # Case 2
            base_dir = None
        else:
            # Case 3
            base_dir = build_dir_abs
    else:
        # Case 4
        base_dir = arguments.use_tensorflow_from_location

    if base_dir != None:
        dest_dir = os.path.join(artifacts_location, "tensorflow")
        command_executor(
            ['cp', '-r', base_dir + '/tensorflow/tensorflow/python', dest_dir],
            verbose=True)
    else:
        # Create a sym-link to
        link_src = os.path.join(artifacts_location,
                                "tensorflow/tensorflow/python")
        link_dst = os.path.join(artifacts_location, "tensorflow/python")
        command_executor(['ln', '-sf', link_src, link_dst], verbose=True)

    # Run a quick test
    install_ngraph_tf(tf_version, venv_dir,
                      os.path.join(artifacts_location, ng_tf_whl))

    if arguments.use_grappler_optimizer:
        import tensorflow as tf
        import ngraph_bridge
        if not ngraph_bridge.is_grappler_enabled():
            raise Exception(
                "Build failed: 'use_grappler_optimizer' specified but not used"
            )

    print('\033[1;32mBuild successful\033[0m')
    os.chdir(pwd)
Esempio n. 24
0
def main():
    '''
    Builds TensorFlow, ngraph, and ngraph-tf for python 3
    '''

    # Component versions
    ngraph_version = "v0.19.0"
    tf_version = "v1.13.1"

    # Command line parser options
    parser = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)

    parser.add_argument(
        '--debug_build',
        help="Builds a debug version of the nGraph components\n",
        action="store_true")

    parser.add_argument('--verbose_build',
                        help="Display verbose error messages\n",
                        action="store_true")

    parser.add_argument(
        '--target_arch',
        help=
        "Architecture flag to use (e.g., haswell, core-avx2 etc. Default \'native\'\n",
    )

    parser.add_argument(
        '--build_gpu_backend',
        help=
        "nGraph backends will include nVidia GPU. Use: NGRAPH_TF_BACKEND=GPU\n"
        "Note: You need to have CUDA headers and libraries available on the build system.\n",
        action="store_true")

    parser.add_argument(
        '--build_plaidml_backend',
        help=
        "nGraph backends will include PlaidML bckend. Use: NGRAPH_TF_BACKEND=PLAIDML\n",
        action="store_true")

    parser.add_argument(
        '--build_intelgpu_backend',
        help=
        "nGraph backends will include Intel GPU bckend. Use: NGRAPH_TF_BACKEND=INTELGPU\n",
        action="store_true")

    parser.add_argument(
        '--use_prebuilt_tensorflow',
        help="Skip building TensorFlow and use downloaded version.\n" +
        "Note that in this case C++ unit tests won't be build for nGraph-TF bridge",
        action="store_true")

    parser.add_argument(
        '--distributed_build',
        type=str,
        help="Builds a distributed version of the nGraph components\n",
        action="store")

    parser.add_argument(
        '--enable_variables_and_optimizers',
        help=
        "Ops like variable and optimizers are supported by nGraph in this version of the bridge\n",
        action="store_true")

    parser.add_argument(
        '--use_grappler_optimizer',
        help="Use Grappler optimizer instead of the optimization passes\n",
        action="store_true")

    parser.add_argument('--artifacts_dir',
                        type=str,
                        help="Copy the artifacts to the given directory\n",
                        action="store")

    parser.add_argument(
        '--ngraph_src_dir',
        type=str,
        help=
        "Local nGraph source directory to use. Overrides --ngraph_version.\n",
        action="store")

    parser.add_argument(
        '--ngraph_version',
        type=str,
        help="nGraph version to use. Overridden by --ngraph_src_dir. (Default: "
        + ngraph_version + ")\n",
        action="store")

    parser.add_argument('--skip_tensorflow_build',
                        help="Use TensorFlow that's already installed" +
                        "(do not build or install) \n",
                        action="store_true")

    # Done with the options. Now parse the commandline
    arguments = parser.parse_args()

    if (arguments.debug_build):
        print("Building in DEBUG mode\n")

    verbosity = False
    if (arguments.verbose_build):
        print("Building in with VERBOSE output messages\n")
        verbosity = True

    #-------------------------------
    # Recipe
    #-------------------------------

    # Default directories
    build_dir = 'build_cmake'

    try:
        os.makedirs(build_dir)
    except OSError as exc:  # Python >2.5
        if exc.errno == errno.EEXIST and os.path.isdir(build_dir):
            pass

    pwd = os.getcwd()
    ngraph_tf_src_dir = os.path.abspath(pwd)
    build_dir_abs = os.path.abspath(build_dir)
    os.chdir(build_dir)

    venv_dir = 'venv-tf-py3'
    artifacts_location = 'artifacts'
    if arguments.artifacts_dir:
        artifacts_location = os.path.abspath(arguments.artifacts_dir)

    artifacts_location = os.path.abspath(artifacts_location)
    print("ARTIFACTS location: " + artifacts_location)

    #install virtualenv
    install_virtual_env(venv_dir)

    # Load the virtual env
    load_venv(venv_dir)

    # Setup the virtual env
    setup_venv(venv_dir)

    target_arch = 'native'
    if (arguments.target_arch):
        target_arch = arguments.target_arch

    print("Target Arch: %s" % target_arch)

    # The cxx_abi flag is translated to _GLIBCXX_USE_CXX11_ABI
    # For gcc 4.8 - this flag is set to 0 and newer ones, this is set to 1
    # The specific value is determined from the TensorFlow build
    # Normally the shipped TensorFlow is built with gcc 4.8 and thus this
    # flag is set to 0
    cxx_abi = "0"

    if arguments.use_prebuilt_tensorflow:
        print("Using existing TensorFlow")
        command_executor(["pip", "install", "-U", "tensorflow==" + tf_version])

        import tensorflow as tf
        print('Version information:')
        print('TensorFlow version: ', tf.__version__)
        print('C Compiler version used in building TensorFlow: ',
              tf.__compiler_version__)
        cxx_abi = str(tf.__cxx11_abi_flag__)
    else:
        if not arguments.skip_tensorflow_build:
            print("Building TensorFlow")
            # Download TensorFlow
            download_repo("tensorflow",
                          "https://github.com/tensorflow/tensorflow.git",
                          tf_version)

            # Build TensorFlow
            build_tensorflow(venv_dir, "tensorflow", artifacts_location,
                             target_arch, verbosity)

            # Install tensorflow
            # Note that if gcc 4.8 is used for building TensorFlow this flag
            # will be 0
            cxx_abi = install_tensorflow(venv_dir, artifacts_location)
        else:
            import tensorflow as tf
            print('Version information:')
            print('TensorFlow version: ', tf.__version__)
            print('C Compiler version used in building TensorFlow: ',
                  tf.__compiler_version__)
            cxx_abi = str(tf.__cxx11_abi_flag__)

    # Download nGraph if required.
    ngraph_src_dir = './ngraph'
    if arguments.ngraph_src_dir:
        ngraph_src_dir = arguments.ngraph_src_dir

        print("Using local nGraph source in directory ", ngraph_src_dir)
    else:
        if arguments.ngraph_version:
            ngraph_version = arguments.ngraph_version

        print("nGraph Version: ", ngraph_version)
        download_repo("ngraph", "https://github.com/NervanaSystems/ngraph.git",
                      ngraph_version)

    # Now build nGraph
    ngraph_cmake_flags = [
        "-DNGRAPH_INSTALL_PREFIX=" + artifacts_location,
        "-DNGRAPH_USE_CXX_ABI=" + cxx_abi,
        "-DNGRAPH_DEX_ONLY=TRUE",
        "-DNGRAPH_DEBUG_ENABLE=NO",
        "-DNGRAPH_TARGET_ARCH=" + target_arch,
        "-DNGRAPH_TUNE_ARCH=" + target_arch,
    ]

    if arguments.debug_build:
        ngraph_cmake_flags.extend(["-DCMAKE_BUILD_TYPE=Debug"])

    if (arguments.distributed_build == "OMPI"):
        ngraph_cmake_flags.extend(["-DNGRAPH_DISTRIBUTED_ENABLE=OMPI"])
    elif (arguments.distributed_build == "MLSL"):
        ngraph_cmake_flags.extend(["-DNGRAPH_DISTRIBUTED_ENABLE=MLSL"])
    else:
        ngraph_cmake_flags.extend(["-DNGRAPH_DISTRIBUTED_ENABLE=OFF"])

    if arguments.build_plaidml_backend:
        command_executor(["pip", "install", "-U", "plaidML"])

    flag_string_map = {True: 'YES', False: 'NO'}
    ngraph_cmake_flags.extend([
        "-DNGRAPH_TOOLS_ENABLE=" +
        flag_string_map[platform.system() != 'Darwin']
    ])
    ngraph_cmake_flags.extend([
        "-DNGRAPH_GPU_ENABLE=" + flag_string_map[arguments.build_gpu_backend]
    ])
    ngraph_cmake_flags.extend([
        "-DNGRAPH_PLAIDML_ENABLE=" +
        flag_string_map[arguments.build_plaidml_backend]
    ])
    ngraph_cmake_flags.extend([
        "-DNGRAPH_INTELGPU_ENABLE=" +
        flag_string_map[arguments.build_intelgpu_backend]
    ])
    ngraph_cmake_flags.extend([
        "-DNGRAPH_UNIT_TEST_ENABLE=" +
        flag_string_map[not arguments.use_prebuilt_tensorflow]
    ])

    build_ngraph(build_dir, ngraph_src_dir, ngraph_cmake_flags, verbosity)

    # Next build CMAKE options for the bridge
    tf_src_dir = os.path.abspath("tensorflow")

    ngraph_tf_cmake_flags = [
        "-DNGRAPH_TF_INSTALL_PREFIX=" + artifacts_location,
        "-DUSE_PRE_BUILT_NGRAPH=ON",
        "-DNGRAPH_TARGET_ARCH=" + target_arch,
        "-DNGRAPH_TUNE_ARCH=" + target_arch,
        "-DNGRAPH_ARTIFACTS_DIR=" + artifacts_location,
    ]
    if (arguments.debug_build):
        ngraph_tf_cmake_flags.extend(["-DCMAKE_BUILD_TYPE=Debug"])

    if not arguments.use_prebuilt_tensorflow:
        ngraph_tf_cmake_flags.extend(["-DTF_SRC_DIR=" + tf_src_dir])
        ngraph_tf_cmake_flags.extend([
            "-DUNIT_TEST_TF_CC_DIR=" +
            os.path.join(artifacts_location, "tensorflow")
        ])

    if ((arguments.distributed_build == "OMPI")
            or (arguments.distributed_build == "MLSL")):
        ngraph_tf_cmake_flags.extend(["-DNGRAPH_DISTRIBUTED_ENABLE=TRUE"])
    else:
        ngraph_tf_cmake_flags.extend(["-DNGRAPH_DISTRIBUTED_ENABLE=FALSE"])

    ngraph_tf_cmake_flags.extend([
        "-DUNIT_TEST_ENABLE=" +
        flag_string_map[not arguments.use_prebuilt_tensorflow]
    ])
    ngraph_tf_cmake_flags.extend([
        "-DNGRAPH_TF_ENABLE_VARIABLES_AND_OPTIMIZERS=" +
        flag_string_map[arguments.enable_variables_and_optimizers]
    ])
    ngraph_tf_cmake_flags.extend([
        "-DNGRAPH_TF_USE_GRAPPLER_OPTIMIZER=" +
        flag_string_map[arguments.use_grappler_optimizer]
    ])

    # Now build the bridge
    ng_tf_whl = build_ngraph_tf(build_dir, artifacts_location,
                                ngraph_tf_src_dir, venv_dir,
                                ngraph_tf_cmake_flags, verbosity)

    # Make sure that the ngraph bridge whl is present in the artfacts directory
    if not os.path.isfile(os.path.join(artifacts_location, ng_tf_whl)):
        raise Exception("Cannot locate nGraph whl in the artifacts location")

    print("SUCCESSFULLY generated wheel: %s" % ng_tf_whl)
    print("PWD: " + os.getcwd())

    # Copy the TensorFlow Python code tree to artifacts directory so that they can
    # be used for running TensorFlow Python unit tests
    if not arguments.use_prebuilt_tensorflow:
        command_executor([
            'cp', '-r', build_dir_abs + '/tensorflow/tensorflow/python',
            os.path.join(artifacts_location, "tensorflow")
        ])

    # Run a quick test
    install_ngraph_tf(venv_dir, os.path.join(artifacts_location, ng_tf_whl))

    if arguments.use_grappler_optimizer:
        import tensorflow as tf
        import ngraph_bridge
        if not ngraph_bridge.is_grappler_enabled():
            raise Exception(
                "Build failed: 'use_grappler_optimizer' specified but not used"
            )

    print('\033[1;32mBuild successful\033[0m')
    os.chdir(pwd)
Esempio n. 25
0
    def test_command_line_api(self, inp_format, inp_loc, out_format,
                              commandline, ng_device, shape_hints, precompile):
        # Only run this test when grappler is enabled
        if not ngraph_bridge.is_grappler_enabled():
            return
        assert Testtf2ngraph.format_and_loc_match(inp_format, inp_loc)
        out_loc = inp_loc.split('.')[0] + '_modified' + ('' if out_format
                                                         == 'savedmodel' else
                                                         ('.' + out_format))
        try:
            (shutil.rmtree, os.remove)[os.path.isfile(out_loc)](out_loc)
        except:
            pass
        conversion_successful = False
        try:
            optional_backend_params = {
                'CPU': {
                    'device_config': '0'
                },
                'INTERPRETER': {
                    'test_echo': '1'
                }
            }[ng_device]
            config_file_name = 'temp_config_file.json'
            Tf2ngraphJson.dump_json(config_file_name, optional_backend_params,
                                    shape_hints)
            if commandline:
                # In CI this test is expected to be run out of artifacts/test/python
                command_executor('python ../../tools/tf2ngraph.py --input_' +
                                 inp_format + ' ' + inp_loc +
                                 ' --output_nodes out_node --output_' +
                                 out_format + ' ' + out_loc +
                                 ' --ng_backend ' + ng_device +
                                 ' --config_file ' + config_file_name +
                                 ("", " --precompile ")[precompile])
            else:
                convert(inp_format, inp_loc, out_format, out_loc, ['out_node'],
                        ng_device, optional_backend_params, shape_hints,
                        precompile)
            conversion_successful = True
        finally:
            if not conversion_successful:
                try:
                    (shutil.rmtree,
                     os.remove)[os.path.isfile(out_loc)](out_loc)
                    os.remove(config_file_name)
                except:
                    pass
        assert conversion_successful

        gdef = get_gdef(out_format, out_loc)
        (shutil.rmtree, os.remove)[os.path.isfile(out_loc)](out_loc)
        os.remove(config_file_name)

        with tf.Graph().as_default() as g:
            tf.import_graph_def(gdef, name='')
            # The graph should have exactly one encapsulate
            assert len([
                0 for i in g.get_operations() if i.type == 'NGraphEncapsulate'
            ]) == 1
            # TODO: check that the encapsulate op has correct backend and extra params attached to it
            x = self.get_tensor(g, "x:0", False)
            y = self.get_tensor(g, "y:0", False)
            out = self.get_tensor(g, "out_node:0", False)

            sess_fn = lambda sess: sess.run(
                [out], feed_dict={i: np.zeros((10, ))
                                  for i in [x, y]})

            res1 = self.with_ngraph(sess_fn)
            res2 = self.without_ngraph(sess_fn)

            exp = [0.5 * np.ones((10, ))]
            # Note both run on Host (because NgraphEncapsulate can only run on host)
            assert np.isclose(res1, res2).all()
            # Comparing with expected value
            assert np.isclose(res1, exp).all()