Beispiel #1
0
def test_main_without_json(pd_obj=None, tf_obj=None, config=None):
    assert config is not None, "API config must be set."

    args = parse_args()
    if _check_disabled(config, args):
        return

    _adaptive_repeat(config, args)
    config.backward = args.backward
    use_feed_fetch = True if args.task == "accuracy" else False

    feeder_adapter = None
    if _is_tensorflow_enabled(args, config):
        assert tf_obj is not None, "TensorFlow object is None."
        tf_config = config.to_tensorflow()
        print(tf_config)
        feeder_adapter = tf_obj.generate_random_feeder(tf_config,
                                                       use_feed_fetch)
        tf_outputs, tf_stats = tf_obj.run(tf_config, args, use_feed_fetch,
                                          feeder_adapter)
        if args.task == "speed":
            tf_stats["gpu_time"] = args.gpu_time
            utils.print_benchmark_result(tf_stats,
                                         log_level=args.log_level,
                                         config_params=config.to_string())

    if _is_paddle_enabled(args, config):
        assert pd_obj is not None, "Paddle object is None."
        print(config)
        pd_outputs, pd_stats = pd_obj.run(config, args, use_feed_fetch,
                                          feeder_adapter)

        if args.task == "speed":
            pd_stats["gpu_time"] = args.gpu_time
            utils.print_benchmark_result(pd_stats,
                                         log_level=args.log_level,
                                         config_params=config.to_string())

        if pd_outputs == False:
            sys.exit(1)

    if args.task == "accuracy":
        if config.run_tf:
            if args.log_level == 1:
                for i in range(len(pd_outputs)):
                    out = pd_outputs[i]
                    if isinstance(out, np.ndarray):
                        print(
                            "Paddle's {}-th output is a np.ndarray, the shape is {}."
                            .format(i, out.shape))
            if args.log_level == 2:
                print("Output of Paddle: ", pd_outputs)
                print("Output of TensorFlow: ", tf_outputs)
            utils.check_outputs(pd_outputs,
                                tf_outputs,
                                name=config.api_name,
                                atol=config.atol,
                                use_gpu=args.use_gpu,
                                backward=pd_obj.backward,
                                config_params=config.to_string())
Beispiel #2
0
def test_main_without_json(pd_obj=None, tf_obj=None, config=None):
    assert config is not None, "API config must be set."
    if config.disabled():
        warnings.simplefilter('always', UserWarning)
        warnings.warn("This config is disabled.")
        return

    args = parse_args()
    _adaptive_repeat(config, args)
    config.backward = args.backward
    use_feed_fetch = True if args.task == "accuracy" else False

    feeder_adapter = None
    if _is_tensorflow_enabled(args, config):
        assert tf_obj is not None, "TensorFlow object is None."
        tf_config = config.to_tensorflow()
        print(tf_config)
        feeder_adapter = tf_obj.generate_random_feeder(tf_config,
                                                       use_feed_fetch)
        tf_outputs, tf_stats = tf_obj.run(tf_config, args, use_feed_fetch,
                                          feeder_adapter)
        if args.task == "speed":
            tf_stats["gpu_time"] = args.gpu_time
            utils.print_benchmark_result(tf_stats,
                                         log_level=args.log_level,
                                         config_params=config.to_string())

    if _is_paddle_enabled(args, config):
        assert pd_obj is not None, "Paddle object is None."
        print(config)
        pd_outputs, pd_stats = pd_obj.run(config, args, use_feed_fetch,
                                          feeder_adapter)

        if args.task == "speed":
            pd_stats["gpu_time"] = args.gpu_time
            utils.print_benchmark_result(pd_stats,
                                         log_level=args.log_level,
                                         config_params=config.to_string())

        if pd_outputs == False:
            sys.exit(1)

    if args.task == "accuracy":
        if config.run_tf:
            utils.check_outputs(pd_outputs,
                                tf_outputs,
                                name=config.api_name,
                                atol=config.atol,
                                backward=pd_obj.backward,
                                config_params=config.to_string())
        else:
            warnings.simplefilter('always', UserWarning)
            warnings.warn("This config is not supported by TensorFlow.")
Beispiel #3
0
def test_main_without_json(pd_obj=None,
                           tf_obj=None,
                           pd_dy_obj=None,
                           torch_obj=None,
                           config=None):
    assert config is not None, "API config must be set."

    args = parse_args()
    if _check_disabled(config, args):
        return

    _adaptive_repeat(config, args)
    config.backward = args.backward
    use_feed_fetch = True if args.task == "accuracy" else False

    feeder_adapter = None
    if is_tensorflow_enabled(args, config):
        assert tf_obj is not None, "TensorFlow object is None."
        tf_config = config.to_tensorflow()
        print(tf_config)
        feeder_adapter = tf_obj.generate_random_feeder(tf_config,
                                                       use_feed_fetch)
        tf_outputs, tf_stats = tf_obj.run(tf_config, args, use_feed_fetch,
                                          feeder_adapter)
        if args.task == "speed":
            tf_stats["gpu_time"] = args.gpu_time
            utils.print_benchmark_result(
                tf_stats,
                task=args.task,
                log_level=args.log_level,
                config_params=config.to_string())

    if is_paddle_enabled(args, config) and args.testing_mode == "static":
        assert pd_obj is not None, "Paddle object is None."
        print(config)
        pd_outputs, pd_stats = pd_obj.run(config, args, use_feed_fetch,
                                          feeder_adapter)

        if args.task == "speed":
            pd_stats["gpu_time"] = args.gpu_time
            utils.print_benchmark_result(
                pd_stats,
                task=args.task,
                log_level=args.log_level,
                config_params=config.to_string())

        if pd_outputs == False:
            sys.exit(1)

    if is_torch_enabled(args, config):
        assert torch_obj is not None, "PyTorch object is None."
        import torch
        try:
            import paddle
            flags = paddle.get_flags(["FLAGS_cudnn_exhaustive_search"])
            torch.backends.cudnn.benchmark = flags[
                "FLAGS_cudnn_exhaustive_search"]
        except Exception:
            torch.backends.cudnn.benchmark = os.environ.get(
                "FLAGS_cudnn_exhaustive_search", False)

        torch_config = config.to_pytorch()
        print(torch_config)
        torch_outputs, torch_stats = torch_obj.run(torch_config, args)
        feeder_adapter = torch_obj.generate_random_feeder(torch_config)

        if args.task == "speed":
            torch_stats["gpu_time"] = args.gpu_time
            utils.print_benchmark_result(
                torch_stats,
                task=args.task,
                log_level=args.log_level,
                config_params=config.to_string())

    if is_paddle_enabled(args, config) and args.testing_mode == "dynamic":
        assert pd_dy_obj is not None, "Paddle dynamic object is None."
        print(config)
        pd_dy_outputs, pd_dy_stats = pd_dy_obj.run(
            config, args, feeder_adapter=feeder_adapter)

        if args.task in ["speed", "scheduling"]:
            if args.task == "speed":
                pd_dy_stats["gpu_time"] = args.gpu_time
            if args.task == "scheduling":
                pd_dy_stats["scheduling_times"] = args.scheduling_times
            utils.print_benchmark_result(
                pd_dy_stats,
                task=args.task,
                log_level=args.log_level,
                config_params=config.to_string())

        if pd_dy_outputs == False:
            sys.exit(1)

    if args.task == "accuracy":
        is_run_tf = config.run_tf and args.testing_mode == "static"
        is_run_torch = config.run_torch and args.testing_mode == "dynamic"
        if is_run_tf:
            base_outputs = pd_outputs
            compare_outputs = tf_outputs
            backward = pd_obj.backward
        elif is_run_torch:
            base_outputs = pd_dy_outputs
            compare_outputs = torch_outputs
            backward = pd_dy_obj.backward

        if is_run_tf or is_run_torch:
            if args.log_level == 1:
                for i in range(len(base_outputs)):
                    out = base_outputs[i]
                    if isinstance(out, np.ndarray):
                        print(
                            "Paddle's {}-th output is a np.ndarray, the shape is {}.".
                            format(i, out.shape))
            if args.log_level == 2:
                print("Output of Paddle: ", base_outputs)
                print("Output of TensorFlow: ", compare_outputs)
            utils.check_outputs(
                base_outputs,
                compare_outputs,
                args.testing_mode,
                name=config.api_name,
                atol=config.atol,
                use_gpu=args.use_gpu,
                backward=backward,
                config_params=config.to_string())