Ejemplo n.º 1
0
def _op_build_ascend(opnames, computes, args, custom_schedule, device,
                     kernel_name, attrs):
    tmp_rst = op_build_to_func(opnames, computes, args, custom_schedule,
                               device, kernel_name, attrs)
    if tmp_rst is not None:
        try:
            _api_internal._BuildToModule(tmp_rst, _get_target(device))
        except Exception:
            logging.error(traceback.format_exc())
            return None
    return True
Ejemplo n.º 2
0
def build(inputs, args, target=None, shape_params=None, name="default_function",
          binds=None, attrs=None, polyhedral=False, aicpu=False):
    tmp_rst = build_to_func(inputs, args, shape_params=shape_params, name=name, binds=binds,
                            attrs=attrs, polyhedral=polyhedral, aicpu=aicpu)

    tmp_target = target if target is not None else 'cce'
    return _api_internal._BuildToModule(tmp_rst, tmp_target)
Ejemplo n.º 3
0
def op_build(opnames, computes, args, custom_schedule, device, kernel_name,
             attrs):
    """op_build"""
    if device in ("aicore", "aicpu"):
        tmp_rst = op_build_to_func(opnames, computes, args, custom_schedule,
                                   device, kernel_name, attrs)
        return _api_internal._BuildToModule(tmp_rst)

    if device == "cuda":
        cuda_path = os.path.realpath(MS_CUDA_KERNEL_PATH)
        if not os.path.isdir(cuda_path):
            os.makedirs(cuda_path)
        if not opnames:
            logging.error("no opname given.")
            return None

        schedule_name = 'gpu_schedule_' + opnames[0]
        schedule_func = getattr(akg.ms.gpu, schedule_name)
        if not isinstance(schedule_func,
                          (types.FunctionType, typing.Callable)):
            logging.error("no schedule func found %s", str(schedule_name))
            return None

        ptx_file = os.path.realpath(MS_CUDA_KERNEL_PATH + kernel_name + ".ptx")
        if os.path.exists(ptx_file):
            os.remove(ptx_file)
        try:
            with open(ptx_file, 'at') as file:
                fcntl.flock(file.fileno(), fcntl.LOCK_EX)
                file.seek(0, 2)
                if file.tell() == 0:
                    s = schedule_func(computes)
                    foo = akg.tvm.build(s, args, device, name=kernel_name)
                    ptx_code = foo.imported_modules[0].get_source("ptx")
                    file.write(ptx_code)
                    json_file = os.path.realpath(MS_CUDA_KERNEL_PATH +
                                                 kernel_name + ".json")
                    kernel_info = (ptx_code, json_file, kernel_name)
                    gpu_utils.save_gpu_params(s, args, kernel_info)
            os.chmod(ptx_file, 0o400)
        except Exception:
            logging.error(traceback.format_exc())
            return None
        return True

    logging.error("Not support device %s.", device)
    return None
Ejemplo n.º 4
0
def compilewithjson(json_str):
    tmp_rst = compilewithjson_to_func(json_str)
    if isinstance(tmp_rst, bool):
        return tmp_rst
    return _api_internal._BuildToModule(tmp_rst)
Ejemplo n.º 5
0
def _build(desc_s, desc_d, attr=None):
    if desc_d['process'] == 'cuda':
        func = tvm.get_global_func("composite_with_json")
        return func(desc_s, attr)
    rst = _build_to_func(desc_s, desc_d, attr)
    return _api_internal._BuildToModule(rst)
Ejemplo n.º 6
0
def _build(desc_s, desc_d, attrs=None, poly=False, use_repo=True):
    if desc_d['process'] == 'cuda':
        return _build_to_gpu_func(desc_s, desc_d, attrs, poly)
    rst = _build_to_func(desc_s, desc_d, attrs, use_repo)
    return _api_internal._BuildToModule(rst)