Esempio n. 1
0
def match_ethosu_conv2d(output_tensor):
    """Match a Tensor Expression corresponding to an NPU Conv2D.

    If the Tensor Expression matches, an EthosuPart will be created that models the
    matched Tensor Expression. Otherwise, None will be returned.

    Parameters
    ----------
    output_tensor : tvm.te.Tensor
        The tensor to attempt to match with.

    Returns
    -------
    Union[None, EthosuPart]
        The created EthosuPart if there was a match, otherwise None.

    """
    write = output_tensor
    if write.op.name != "ethosu_write":
        return None
    convert_to_nhcwb16 = write.op.input_tensors[0]
    if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
        return None
    conv2d = convert_to_nhcwb16.op.input_tensors[0]
    if conv2d.op.name != "ethosu_conv2d":
        return None
    pad = conv2d.op.input_tensors[0]
    if pad.op.name != "ethosu_pad":
        return None
    convert_to_nhwc = pad.op.input_tensors[0]
    if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
        return None
    read = convert_to_nhwc.op.input_tensors[0]
    if read.op.name != "ethosu_read":
        return None

    input_tensors = [
        read.op.input_tensors[0],
        conv2d.op.input_tensors[1],
        conv2d.op.input_tensors[2],
    ]
    subgraph = TESubgraph(input_tensors, output_tensor)
    propagators = [
        write.op.attrs["ifm_propagator"],
        write.op.attrs["weights_propagator"],
        write.op.attrs["bias_propagator"],
    ]
    # TODO(@jacobbohlin) Both the output_quantum and quantum_cycles here are placeholders,
    # needs true implementation.
    if convert_to_nhcwb16.op.attrs["layout"] == "NHWC":
        output_quantum = [1, 2, 2, 1]
    else:
        output_quantum = [1, 2, 1, 2, 1]
    quantum_cycles = 1000
    return EthosuPart(subgraph, propagators, output_quantum, quantum_cycles)
Esempio n. 2
0
def match_ethosu_unary_elementwise(output_tensor, device_config):
    """Match a Tensor Expression corresponding to an NPU Unary Elementwise.

    If the Tensor Expression matches, an EthosuPart will be created that models the
    matched Tensor Expression. Otherwise, None will be returned.

    Parameters
    ----------
    output_tensor : tvm.te.Tensor
        The tensor to attempt to match with.
    device_config : EthosuDeviceConfig
        Target device configuration

    Returns
    -------
    Union[None, EthosuPart]
        The created EthosuPart if there was a match, otherwise None.

    """
    write = output_tensor
    if write.op.name != "ethosu_write":
        return None
    convert_to_nhcwb16 = write.op.input_tensors[0]
    if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
        return None
    unary_elementwise = convert_to_nhcwb16.op.input_tensors[0]
    if unary_elementwise.op.name != "ethosu_unary_elementwise":
        return None
    pad = unary_elementwise.op.input_tensors[0]
    if pad.op.name != "ethosu_pad":
        return None
    convert_to_nhwc = pad.op.input_tensors[0]
    if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
        return None
    read = convert_to_nhwc.op.input_tensors[0]
    if read.op.name != "ethosu_read":
        return None

    input_tensors = [
        read.op.input_tensors[0],
    ]
    subgraph = TESubgraph(input_tensors, output_tensor)
    propagators = [
        write.op.attrs["ifm_propagator"],
    ]
    ifm_dtype = input_tensors[0].dtype
    ofm_dtype = output_tensor.dtype

    output_layout = convert_to_nhcwb16.op.attrs["layout"]
    input_layout = convert_to_nhwc.op.attrs["layout"]
    output_quantum = device_config.get_output_quantum(output_layout)

    block_config = device_config.get_elementwise_block_config(
        propagators[0],
        None,
        unary_elementwise.op.attrs,
        output_tensor.shape,
        output_layout,
        input_layout,
        None,
        ifm_dtype,
        ofm_dtype,
    )

    return EthosuPart(
        subgraph,
        propagators,
        output_quantum,
        1,
        block_config,
    )
Esempio n. 3
0
def match_ethosu_conv2d(output_tensor, device_config):
    """Match a Tensor Expression corresponding to an NPU Conv2D.

    If the Tensor Expression matches, an EthosuPart will be created that models the
    matched Tensor Expression. Otherwise, None will be returned.

    Parameters
    ----------
    output_tensor : tvm.te.Tensor
        The tensor to attempt to match with.
    device_config : EthosuDeviceConfig
        Target device configuration

    Returns
    -------
    Union[None, EthosuPart]
        The created EthosuPart if there was a match, otherwise None.

    """
    write = output_tensor
    if write.op.name != "ethosu_write":
        return None
    convert_to_nhcwb16 = write.op.input_tensors[0]
    if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
        return None
    conv2d = convert_to_nhcwb16.op.input_tensors[0]
    if conv2d.op.name != "ethosu_conv2d":
        return None
    pad = conv2d.op.input_tensors[0]
    if pad.op.name != "ethosu_pad":
        return None
    upscale = pad.op.input_tensors[0]
    if upscale.op.name != "ethosu_upscale":
        return None
    convert_to_nhwc = upscale.op.input_tensors[0]
    if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
        return None
    read = convert_to_nhwc.op.input_tensors[0]
    if read.op.name != "ethosu_read":
        return None

    input_tensors = [
        read.op.input_tensors[0],
        conv2d.op.input_tensors[1],
        conv2d.op.input_tensors[2],
    ]

    subgraph = TESubgraph(input_tensors, output_tensor)
    propagators = [
        write.op.attrs["ifm_propagator"],
        write.op.attrs["weights_propagator"],
        write.op.attrs["bias_propagator"],
    ]
    ifm_dtype = input_tensors[0].dtype
    ofm_dtype = output_tensor.dtype

    ifm_channels = int(input_tensors[0].shape[3])
    ofm_channels, kernel_height, kernel_width = (
        int(axis) for axis in input_tensors[1].shape[0:3])
    kernel_elements = kernel_height * kernel_width

    is_part_kernel = device_config.is_partkernel(conv2d.op.name, ifm_channels,
                                                 ifm_dtype, kernel_elements)
    subkernels = len(
        device_config.get_kernel_steps(conv2d.op.name, kernel_height,
                                       kernel_width, ifm_dtype,
                                       is_part_kernel))

    output_layout = convert_to_nhcwb16.op.attrs["layout"]
    input_layout = convert_to_nhwc.op.attrs["layout"]
    output_quantum = device_config.get_output_quantum(output_layout)

    valid_block_configs = device_config.get_valid_block_configs(
        propagators[0],
        conv2d.op.attrs,
        output_tensor.shape,
        ofm_channels,
        ifm_channels,
        output_layout,
        input_layout,
        ifm_dtype,
        ofm_dtype,
        kernel_height,
        kernel_width,
    )

    return EthosuPart(
        subgraph,
        propagators,
        output_quantum,
        subkernels,
        valid_block_configs,
        1,
    )
Esempio n. 4
0
def match_ethosu_pooling(output_tensor, device_config):
    """Match a Tensor Expression corresponding to an NPU Pooling.

    If the Tensor Expression matches, an EthosuPart will be created that models the
    matched Tensor Expression. Otherwise, None will be returned.

    Parameters
    ----------
    output_tensor : tvm.te.Tensor
        The tensor to attempt to match with.
    device_config : EthosuDeviceConfig
        Target device configuration

    Returns
    -------
    Union[None, EthosuPart]
        The created EthosuPart if there was a match, otherwise None.

    """
    write = output_tensor
    if write.op.name != "ethosu_write":
        return None
    convert_to_nhcwb16 = write.op.input_tensors[0]
    if convert_to_nhcwb16.op.name != "ethosu_convert_to_nhcwb16":
        return None
    pool2d = convert_to_nhcwb16.op.input_tensors[0]
    if pool2d.op.name != "ethosu_pooling":
        return None
    pad = pool2d.op.input_tensors[0]
    if pad.op.name != "ethosu_pad":
        return None
    convert_to_nhwc = pad.op.input_tensors[0]
    if convert_to_nhwc.op.name != "ethosu_convert_to_nhwc":
        return None
    read = convert_to_nhwc.op.input_tensors[0]
    if read.op.name != "ethosu_read":
        return None

    input_tensors = [
        read.op.input_tensors[0],
    ]
    subgraph = TESubgraph(input_tensors, output_tensor)
    propagators = [
        write.op.attrs["ifm_propagator"],
    ]
    ifm_dtype = input_tensors[0].dtype
    ofm_dtype = output_tensor.dtype

    ifm_channels = int(input_tensors[0].shape[3])
    ofm_channels = ifm_channels
    pool_shape_h = int(pool2d.op.attrs["pool_shape_h"])
    pool_shape_w = int(pool2d.op.attrs["pool_shape_w"])

    subkernels = len(
        device_config.get_kernel_steps(pool2d.op.name, pool_shape_h, pool_shape_w, ifm_dtype)
    )

    output_layout = convert_to_nhcwb16.op.attrs["layout"]
    input_layout = convert_to_nhwc.op.attrs["layout"]
    output_quantum = device_config.get_output_quantum(output_layout)

    valid_block_configs = device_config.get_valid_block_configs(
        propagators[0],
        pool2d.op.attrs,
        output_tensor.shape,
        ofm_channels,
        ifm_channels,
        output_layout,
        input_layout,
        ifm_dtype,
        ofm_dtype,
        pool_shape_h,
        pool_shape_w,
    )

    return EthosuPart(
        subgraph,
        propagators,
        output_quantum,
        subkernels,
        valid_block_configs,
    )
Esempio n. 5
0
def match_ethosu_identity(output_tensor, device_config):
    """Match a Tensor Expression corresponding to an NPU identity.

    If the Tensor Expression matches, an EthosuPart will be created that models the
    matched Tensor Expression. Otherwise, None will be returned.

    Parameters
    ----------
    output_tensor : tvm.te.Tensor
        The tensor to attempt to match with.
    device_config : EthosuDeviceConfig
        Target device configuration

    Returns
    -------
    Union[None, EthosuPart]
        The created EthosuPart if there was a match, otherwise None.
    """
    write = output_tensor
    if write.op.name != "ethosu_write":
        return None
    identity = write.op.input_tensors[0]
    if identity.op.name != "ethosu_identity":
        return None
    read = identity.op.input_tensors[0]
    if read.op.name != "ethosu_read":
        return None

    input_tensors = [
        read.op.input_tensors[0],
    ]
    subgraph = TESubgraph(input_tensors, output_tensor)
    propagators = [
        write.op.attrs["ifm_propagator"],
    ]
    ifm_dtype = input_tensors[0].dtype
    ofm_dtype = output_tensor.dtype

    input_tensors_shape = input_tensors[0].shape
    length = len(input_tensors_shape)
    assert length <= 4, "Input tensor shape must be <= 4 for the identity operator"
    channels = int(input_tensors_shape[length - 1]) if length >= 3 else 1

    subkernels = len(
        device_config.get_kernel_steps(identity.op.name, 1, 1, ifm_dtype))

    input_layout = output_layout = "NHWC"
    output_quantum = device_config.get_output_quantum(output_layout)

    valid_block_configs = device_config.get_valid_block_configs(
        propagators[0],
        identity.op.attrs,
        output_tensor.shape,
        channels,
        channels,
        output_layout,
        input_layout,
        ifm_dtype,
        ofm_dtype,
        1,
        1,
    )

    return EthosuPart(
        subgraph,
        propagators,
        output_quantum,
        subkernels,
        valid_block_configs,
    )