Beispiel #1
0
def test_main_without_json(pd_obj=None, tf_obj=None, config=None):
    if config is None:
        raise ValueError("API config must be set.")

    args = parse_args()
    config.backward = args.backward
    feed_spec = copy_feed_spec(config)
    feed_list = None
    if args.task == "accuracy" or args.framework in ["paddle", "both"]:
        if pd_obj is None:
            raise ValueError("Paddle object is None.")
        print(config)
        pd_obj.name = config.name
        pd_obj.create_program()
        pd_obj.build_program(config=config)
        feed_list = feeder.feed_paddle(pd_obj, feed_spec=feed_spec)
        pd_outputs = run_paddle(args.task, pd_obj, args, feed_list)

    if args.task == "accuracy" or args.framework in [
            "tensorflow", "tf", "both"
    ]:
        if tf_obj is None:
            raise ValueError("TensorFlow object is None.")
        tf_config = config.to_tensorflow()
        print(tf_config)
        tf_obj.name = tf_config.name
        tf_obj.build_graph(config=tf_config)
        feed_list = feeder.feed_tensorflow(
            tf_obj, feed_list, feed_spec=feed_spec)
        tf_outputs = run_tensorflow(args.task, tf_obj, args, feed_list)

    if args.task == "accuracy":
        utils.check_outputs(pd_outputs, tf_outputs, name=pd_obj.name)
Beispiel #2
0
def test_main_without_json(pd_obj=None, tf_obj=None, config=None):
    assert config is not None, "API config must be set."

    args = parse_args()
    if _check_disabled(config, args):
        return

    _adaptive_repeat(config, args)
    config.backward = args.backward
    use_feed_fetch = True if args.task == "accuracy" else False

    feeder_adapter = None
    if _is_tensorflow_enabled(args, config):
        assert tf_obj is not None, "TensorFlow object is None."
        tf_config = config.to_tensorflow()
        print(tf_config)
        feeder_adapter = tf_obj.generate_random_feeder(tf_config,
                                                       use_feed_fetch)
        tf_outputs, tf_stats = tf_obj.run(tf_config, args, use_feed_fetch,
                                          feeder_adapter)
        if args.task == "speed":
            tf_stats["gpu_time"] = args.gpu_time
            utils.print_benchmark_result(tf_stats,
                                         log_level=args.log_level,
                                         config_params=config.to_string())

    if _is_paddle_enabled(args, config):
        assert pd_obj is not None, "Paddle object is None."
        print(config)
        pd_outputs, pd_stats = pd_obj.run(config, args, use_feed_fetch,
                                          feeder_adapter)

        if args.task == "speed":
            pd_stats["gpu_time"] = args.gpu_time
            utils.print_benchmark_result(pd_stats,
                                         log_level=args.log_level,
                                         config_params=config.to_string())

        if pd_outputs == False:
            sys.exit(1)

    if args.task == "accuracy":
        if config.run_tf:
            if args.log_level == 1:
                for i in range(len(pd_outputs)):
                    out = pd_outputs[i]
                    if isinstance(out, np.ndarray):
                        print(
                            "Paddle's {}-th output is a np.ndarray, the shape is {}."
                            .format(i, out.shape))
            if args.log_level == 2:
                print("Output of Paddle: ", pd_outputs)
                print("Output of TensorFlow: ", tf_outputs)
            utils.check_outputs(pd_outputs,
                                tf_outputs,
                                name=config.api_name,
                                atol=config.atol,
                                use_gpu=args.use_gpu,
                                backward=pd_obj.backward,
                                config_params=config.to_string())
Beispiel #3
0
def test_main_without_json(pd_obj=None, tf_obj=None, config=None):
    assert config is not None, "API config must be set."
    if config.disabled():
        warnings.simplefilter('always', UserWarning)
        warnings.warn("This config is disabled.")
        return

    args = parse_args()
    _adaptive_repeat(config, args)
    config.backward = args.backward
    use_feed_fetch = True if args.task == "accuracy" else False

    feeder_adapter = None
    if _is_tensorflow_enabled(args, config):
        assert tf_obj is not None, "TensorFlow object is None."
        tf_config = config.to_tensorflow()
        print(tf_config)
        feeder_adapter = tf_obj.generate_random_feeder(tf_config,
                                                       use_feed_fetch)
        tf_outputs, tf_stats = tf_obj.run(tf_config, args, use_feed_fetch,
                                          feeder_adapter)
        if args.task == "speed":
            tf_stats["gpu_time"] = args.gpu_time
            utils.print_benchmark_result(tf_stats,
                                         log_level=args.log_level,
                                         config_params=config.to_string())

    if _is_paddle_enabled(args, config):
        assert pd_obj is not None, "Paddle object is None."
        print(config)
        pd_outputs, pd_stats = pd_obj.run(config, args, use_feed_fetch,
                                          feeder_adapter)

        if args.task == "speed":
            pd_stats["gpu_time"] = args.gpu_time
            utils.print_benchmark_result(pd_stats,
                                         log_level=args.log_level,
                                         config_params=config.to_string())

        if pd_outputs == False:
            sys.exit(1)

    if args.task == "accuracy":
        if config.run_tf:
            utils.check_outputs(pd_outputs,
                                tf_outputs,
                                name=config.api_name,
                                atol=config.atol,
                                backward=pd_obj.backward,
                                config_params=config.to_string())
        else:
            warnings.simplefilter('always', UserWarning)
            warnings.warn("This config is not supported by TensorFlow.")
Beispiel #4
0
def run_and_check(pd_obj, tf_obj, backward, use_gpu, name):
    # Define Paddle program
    pd_obj.build_program(backward=backward)

    # Define Tensorflow graph
    tf_obj.build_graph(backward=backward)

    pd_feed, tf_feed = feed_random_data(pd_obj, tf_obj)

    # Run Paddle
    pd_outputs = pd_obj.run_with_executor(use_gpu=use_gpu,
                                          feed=pd_feed,
                                          check_output=False)

    # Run Tensorflow
    tf_outputs = tf_obj.run(use_gpu=use_gpu, feed=tf_feed, check_output=False)

    utils.check_outputs(pd_outputs, tf_outputs, name=name)
Beispiel #5
0
def main(backward, use_gpu):
    # Define Paddle program
    pd_obj = PaddleEmbedding()
    pd_obj.build_program(backward=backward)

    # Define Tensorflow graph
    tf_obj = TensorflowEmbedding()
    tf_obj.build_graph(backward=backward)

    pd_feed, tf_feed = feed_random_data(pd_obj, tf_obj)

    # Run Paddle
    pd_outputs = pd_obj.run_with_executor(use_gpu=use_gpu,
                                          feed=pd_feed,
                                          check_output=False)

    # Run Tensorflow
    tf_outputs = tf_obj.run(use_gpu=use_gpu, feed=tf_feed, check_output=False)

    utils.check_outputs(pd_outputs, tf_outputs, name="embedding")
Beispiel #6
0
def main(backward, use_gpu):
    # Define Paddle program
    pd_obj = PaddleConv2d()
    pd_obj.build_program(backward=backward)

    # Define Tensorflow graph
    tf_obj = TensorflowConv2d()
    tf_obj.build_graph(backward=backward)

    pd_feed, tf_feed = feed_random_data(pd_obj, tf_obj)

    # Run Paddle
    pd_outputs = pd_obj.run_with_executor(use_gpu=use_gpu,
                                          feed=pd_feed,
                                          check_output=False)
    if backward and len(pd_outputs) >= 2:
        pd_outputs[2] = np.transpose(pd_outputs[2], (2, 3, 1, 0))

    # Run Tensorflow
    tf_outputs = tf_obj.run(use_gpu=use_gpu, feed=tf_feed, check_output=False)

    utils.check_outputs(pd_outputs, tf_outputs, name="conv2d")
Beispiel #7
0
def test_main(pd_obj=None, tf_obj=None, feed_spec=None):
    args = parse_args()

    feed_list = None
    if args.task == "accuracy" or args.framework in ["paddle", "both"]:
        if pd_obj is None:
            raise ValueError("Paddle object is None.")
        pd_obj.build_program(backward=args.backward, dtype=args.dtype)
        feed_list = feeder.feed_paddle(pd_obj, feed_spec)
        pd_outputs = test_paddle(args.task, pd_obj, args, feed_list)

    if args.task == "accuracy" or args.framework in [
            "tensorflow", "tf", "both"
    ]:
        if tf_obj is None:
            raise ValueError("TensorFlow object is None.")
        tf_obj.build_graph(backward=args.backward)
        feed_list = feeder.feed_tensorflow(tf_obj, feed_list, feed_spec)
        tf_outputs = test_tensorflow(args.task, tf_obj, args, feed_list)

    if args.task == "accuracy":
        utils.check_outputs(pd_outputs, tf_outputs, name=pd_obj.name)
Beispiel #8
0
def test_main_without_json(pd_obj=None, tf_obj=None, config=None):
    assert config is not None, "API config must be set."

    args = parse_args()
    config.backward = args.backward
    feed_spec = copy_feed_spec(config)
    feed_list = None
    if _is_paddle_enabled(args, config):
        assert pd_obj is not None, "Paddle object is None."
        print(config)
        pd_obj.name = config.name
        pd_obj.create_program()
        pd_obj.build_program(config=config)
        feed_list = feeder.feed_paddle(pd_obj, feed_spec=feed_spec)
        pd_outputs = run_paddle(args.task, pd_obj, args, feed_list)

    if _is_tensorflow_enabled(args, config):
        assert tf_obj is not None, "TensorFlow object is None."
        tf_config = config.to_tensorflow()
        print(tf_config)
        warnings.simplefilter('always', UserWarning)
        tf_obj.name = tf_config.name
        tf_obj.build_graph(config=tf_config)
        feed_list = feeder.feed_tensorflow(tf_obj,
                                           feed_list,
                                           feed_spec=feed_spec)
        tf_outputs = run_tensorflow(args.task, tf_obj, args, feed_list)

    if args.task == "accuracy":
        if tf_config.run_tf:
            utils.check_outputs(pd_outputs,
                                tf_outputs,
                                name=pd_obj.name,
                                atol=config.atol)
        else:
            warnings.simplefilter('always', UserWarning)
            warnings.warn("This config is not supported by TensorFlow.")
Beispiel #9
0
def test_main_without_json(pd_obj=None,
                           tf_obj=None,
                           pd_dy_obj=None,
                           torch_obj=None,
                           config=None):
    assert config is not None, "API config must be set."

    args = parse_args()
    if _check_disabled(config, args):
        return

    _adaptive_repeat(config, args)
    config.backward = args.backward
    use_feed_fetch = True if args.task == "accuracy" else False

    feeder_adapter = None
    if is_tensorflow_enabled(args, config):
        assert tf_obj is not None, "TensorFlow object is None."
        tf_config = config.to_tensorflow()
        print(tf_config)
        feeder_adapter = tf_obj.generate_random_feeder(tf_config,
                                                       use_feed_fetch)
        tf_outputs, tf_stats = tf_obj.run(tf_config, args, use_feed_fetch,
                                          feeder_adapter)
        if args.task == "speed":
            tf_stats["gpu_time"] = args.gpu_time
            utils.print_benchmark_result(
                tf_stats,
                task=args.task,
                log_level=args.log_level,
                config_params=config.to_string())

    if is_paddle_enabled(args, config) and args.testing_mode == "static":
        assert pd_obj is not None, "Paddle object is None."
        print(config)
        pd_outputs, pd_stats = pd_obj.run(config, args, use_feed_fetch,
                                          feeder_adapter)

        if args.task == "speed":
            pd_stats["gpu_time"] = args.gpu_time
            utils.print_benchmark_result(
                pd_stats,
                task=args.task,
                log_level=args.log_level,
                config_params=config.to_string())

        if pd_outputs == False:
            sys.exit(1)

    if is_torch_enabled(args, config):
        assert torch_obj is not None, "PyTorch object is None."
        import torch
        try:
            import paddle
            flags = paddle.get_flags(["FLAGS_cudnn_exhaustive_search"])
            torch.backends.cudnn.benchmark = flags[
                "FLAGS_cudnn_exhaustive_search"]
        except Exception:
            torch.backends.cudnn.benchmark = os.environ.get(
                "FLAGS_cudnn_exhaustive_search", False)

        torch_config = config.to_pytorch()
        print(torch_config)
        torch_outputs, torch_stats = torch_obj.run(torch_config, args)
        feeder_adapter = torch_obj.generate_random_feeder(torch_config)

        if args.task == "speed":
            torch_stats["gpu_time"] = args.gpu_time
            utils.print_benchmark_result(
                torch_stats,
                task=args.task,
                log_level=args.log_level,
                config_params=config.to_string())

    if is_paddle_enabled(args, config) and args.testing_mode == "dynamic":
        assert pd_dy_obj is not None, "Paddle dynamic object is None."
        print(config)
        pd_dy_outputs, pd_dy_stats = pd_dy_obj.run(
            config, args, feeder_adapter=feeder_adapter)

        if args.task in ["speed", "scheduling"]:
            if args.task == "speed":
                pd_dy_stats["gpu_time"] = args.gpu_time
            if args.task == "scheduling":
                pd_dy_stats["scheduling_times"] = args.scheduling_times
            utils.print_benchmark_result(
                pd_dy_stats,
                task=args.task,
                log_level=args.log_level,
                config_params=config.to_string())

        if pd_dy_outputs == False:
            sys.exit(1)

    if args.task == "accuracy":
        is_run_tf = config.run_tf and args.testing_mode == "static"
        is_run_torch = config.run_torch and args.testing_mode == "dynamic"
        if is_run_tf:
            base_outputs = pd_outputs
            compare_outputs = tf_outputs
            backward = pd_obj.backward
        elif is_run_torch:
            base_outputs = pd_dy_outputs
            compare_outputs = torch_outputs
            backward = pd_dy_obj.backward

        if is_run_tf or is_run_torch:
            if args.log_level == 1:
                for i in range(len(base_outputs)):
                    out = base_outputs[i]
                    if isinstance(out, np.ndarray):
                        print(
                            "Paddle's {}-th output is a np.ndarray, the shape is {}.".
                            format(i, out.shape))
            if args.log_level == 2:
                print("Output of Paddle: ", base_outputs)
                print("Output of TensorFlow: ", compare_outputs)
            utils.check_outputs(
                base_outputs,
                compare_outputs,
                args.testing_mode,
                name=config.api_name,
                atol=config.atol,
                use_gpu=args.use_gpu,
                backward=backward,
                config_params=config.to_string())