예제 #1
0
def optimize_for_inference(args, outputs):
    args_map = {
        "enable_io16xc32": "f16_io_f32_comp",
        "enable_ioc16": "f16_io_comp",
        "enable_hwcd4": "use_nhwcd4",
        "enable_nchw4": "use_nchw4",
        "enable_nchw88": "use_nchw88",
        "enable_nchw44": "use_nchw44",
        "enable_nchw44_dot": "use_nchw44_dot",
        "enable_nchw32": "use_nchw32",
        "enable_chwn4": "use_chwn4",
        "enable_fuse_conv_bias_nonlinearity": "fuse_conv_bias_nonlinearity",
        "enable_fuse_conv_bias_with_z": "fuse_conv_bias_with_z",
    }
    kwargs = {}
    for k, v in args_map.items():
        if getattr(args, k):
            assert (
                args.optimize_for_inference
            ), "optimize_for_inference should be set when {} is given".format(
                k)
            kwargs[v] = True

    if args.optimize_for_inference:
        outputs = [
            i._node for i in G.optimize_for_inference(outputs, **kwargs)
        ]

    return outputs
예제 #2
0
def optimize_for_inference(args, outputs):
    args_list = [
        "enable_io16xc32",
        "enable_ioc16",
        "enable_hwcd4",
        "enable_nchw4",
        "enable_nchw88",
        "enable_nchw44",
        "enable_nchw44_dot",
        "enable_nchw32",
        "enable_chwn4",
        "enable_fuse_conv_bias_nonlinearity",
        "enable_fuse_conv_bias_with_z",
    ]
    kwargs = {}
    for k in args_list:
        if getattr(args, k):
            kwargs[k] = True

    if args.optimize_for_inference:
        outputs = G.optimize_for_inference(outputs, **kwargs)

    return outputs
예제 #3
0
def run_model(args, graph, inputs, outputs, data):
    # must use level0 to avoid unintended opr modification
    graph.options.graph_opt_level = 0

    logger.info("input tensors: ")
    for k, v in data.items():
        logger.info("  {}: {}".format(k, v.shape))

    G.modify_opr_algo_strategy_inplace(outputs, get_execution_strategy(args))

    if args.optimize_for_inference:
        opt_kwargs = get_opt_kwargs(args)
        outputs = G.optimize_for_inference(outputs, **opt_kwargs)

    # embed inputs must be on the last, to avoid const fold
    if args.embed_input:
        outputs, inp_dict = tools.embed_inputs(outputs, data.values(), inputs=inputs)
    else:
        outputs, inp_dict = tools.convert_inputs(outputs, inputs=inputs)

    if args.dump_cpp_model:
        dump_content, _ = G.dump_graph(outputs, keep_var_name=2)
        with open(args.dump_cpp_model, "wb") as file:
            file.write(dump_content)
        logger.info("C++ model written to {}".format(args.dump_cpp_model))

    outputs, output_dict = tools.convert_outputs(outputs)

    if args.profile:
        profiler = tools.GraphProfiler(graph)

    func = graph.compile(outputs)

    def run():
        if not args.embed_input:
            for key in inp_dict:
                inp_dict[key].set_value(mge.Tensor(data[key])._dev_tensor())
        func.execute()
        func.wait()
        return [oup_node.get_value().numpy() for oup_node in output_dict.values()]

    if args.warm_up:
        logger.info("warming up")
        run()

    total_time = 0

    for i in range(args.iter):
        logger.info("iter {}".format(i))
        start_time = time.time()
        retval = run()
        cur_time = time.time() - start_time
        total_time += cur_time

        avg_speed = (i + 1) / total_time
        if "data" in data:
            avg_speed *= data["data"].shape[0]
            avg_speed_txt = "{:.3f}sample/s".format(avg_speed)
        else:
            avg_speed_txt = "{:.3f}batch/s".format(avg_speed)

        msg = (
            "iter {}: duration={:.4f}({:.4f})s average={:.4f}s "
            "avg_speed={} time={:.4f}s"
        ).format(
            i,
            cur_time,
            func.get_prev_exec_time(),
            total_time / (i + 1),
            avg_speed_txt,
            total_time,
        )
        if args.calc_output_rms:
            rms = []
            for v in retval:
                rms.append("{:.3g}".format(float(((v ** 2).mean()) ** 0.5)))
            msg += " output_rms=[{}]".format(", ".join(rms))
        if logger.level > logging.INFO:
            print(msg)
        else:
            logger.info(msg)

    if args.focused_nvprof:
        if get_device_count("gpu") < 1:
            logger.warning(
                "No cuda device detected. ``focused_nvprof`` will be ignored."
            )
        else:
            try:
                import pycuda.driver as D

                D.start_profiler()
                func.execute()
                func.wait()
                D.stop_profiler()
            except ImportError:
                logger.error("`focused_nvprof need pycuda`", exc_info=True)

    if args.profile:
        with open(args.profile, "w") as fout:
            fout.write(profiler.get())

    return avg_speed