Пример #1
0
def Gather(params_shape,
           indices_shape,
           params_dtype,
           indices_dtype,
           axis,
           kernel_name,
           cce_path="./",
           target=utils.CCE):
    """Gather data by indices"""
    utils.check_shape(params_shape, length=2)
    utils.check_shape(indices_shape, length=1)
    utils.ops_dtype_check(params_dtype, utils.DtypeForDavinci.ALL_TYPES)
    utils.ops_dtype_check(indices_dtype, utils.DtypeForDavinci.INT32)
    utils.check_equal("axis", "zero", axis, 0)

    # construct compute
    o_shape = (indices_shape[0], params_shape[1])
    xx = akg.tvm.placeholder(params_shape, dtype=params_dtype, name="X")
    yy = akg.tvm.placeholder(indices_shape, dtype=indices_dtype, name="Y")
    res = akg.tvm.extern(o_shape, [xx, yy],
                         lambda ins, outs: kernel_ir(outs[0], ins[0], ins[1]),
                         name="res",
                         dtype=params_dtype)
    s = akg.tvm.create_schedule(res.op)

    # create cce
    attrs = {"enable_multicore": False}
    with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
        mod = akg.build(s, [xx, yy, res], "cce", name=kernel_name, attrs=attrs)

    source_code = mod.imported_modules[0].get_source()
    create_code(kernel_name, cce_path, source_code)

    return mod
Пример #2
0
def resize_nearest(input, output_shape):
    """
    Resize images using Nearest-neighbor interpolation.
    
    Args:
        input (tvm.tensor.Tensor): 4-D tensor of type float16 or float32 `("NHWC")`.
        output_shape (Union[tuple, list]): New size of image 4 integers `("NHWC")`.
    
    Note:
        The batch_num("N") of input and output must be equal, channel_num("C") is also.
    
    Returns:
        tvm.tensor.Tensor, has the same type as `input`.
    """
    input_shape = get_shape(input)
    utils.check_shape(input, 4, "input")
    utils.check_shape(output_shape, 4, "output_shape")
    utils.ops_dtype_check(input.dtype, utils.DtypeForDavinci.ALL_FLOAT)
    utils.check_equal("input batchsize", "output batchsize", input_shape[0], output_shape[0])
    utils.check_equal("input channel num", "output channel num", input_shape[3], output_shape[3])

    res = process_integer_scale(input, output_shape)
    if res == None:
        res = process_non_integer_scale(input, output_shape)
    return res
Пример #3
0
def check_inputs(data, block_shape, crops):
    """check input shape and types"""
    utils.ops_dtype_check(data.dtype, utils.DtypeForDavinci.ALL_TYPES)
    utils.check_shape(data, tensor_name="data")
    utils.check_shape(block_shape, tensor_name="block_shape")
    if not isinstance(crops, (list, tuple)):
        raise RuntimeError("crops must be a 2D list or tuple.")
    for cs in crops:
        if not isinstance(cs, (list, tuple)) or len(cs) != 2:
            raise RuntimeError(
                "crops must be a 2D list or tuple and the 2nd dim has length 2."
            )
        if cs[0] < 0 or cs[1] < 0:
            raise RuntimeError(
                "all values in crops must be greater than or equal to zero.")
    utils.check_equal("length of block_shape", "length of crops",
                      len(block_shape), len(crops))
Пример #4
0
def encode_onehot_classes(groundtruth_class, anchor_sample, class_num):
    """
    One-hot encode the anchor_class.
    
    This op uses `anchor_sample` to dereference `groundtruth_class`,
    and then encode the one-hot result for each anchor.
    
    Args:
        groundtruth_class (tvm.tensor.Tensor): The `class_id` of each groundtruth, shape (batchsize, num_groundtruths).
        anchor_sample (tvm.tensor.Tensor): The `groundtruth_id` of each anchor. shape (batchsize, num_anchors).
        class_num (int): Class number.
    
    Returns:
        akg.tvm.Tensor, shape `(batchsize, num_anchors, class_num)`
    """
    utils.check_shape(groundtruth_class, 2, "groundtruth_class")
    utils.check_shape(anchor_sample, 2, "anchor_sample")
    utils.check_equal("batchsize in groundtruth_class",
                      "batchsize in anchor_sample",
                      groundtruth_class.shape[0].value,
                      anchor_sample.shape[0].value)
    utils.ops_dtype_check([groundtruth_class.dtype, anchor_sample.dtype],
                          utils.DtypeForDavinci.INT32)
    utils.check_greater("class_num", "zero", class_num, 0)

    dim_info, _ = encode_one_hot_set_dim_func(groundtruth_class, anchor_sample,
                                              class_num)
    attrs = {"dim": dim_info}

    onehot_res, _ = OneHot(groundtruth_class,
                           class_num,
                           groundtruth_class.dtype,
                           on_value=1,
                           off_value=0,
                           axis=-1)

    an_shape = get_shape(anchor_sample)
    out_shape = an_shape + [class_num]
    res = akg.tvm.compute(
        out_shape,
        lambda b, a, c: onehot_res[b, anchor_sample[b, a], c],
        name="encode_result")
    return res, attrs
Пример #5
0
def confusion_matrix(actual, predict, num_class, target=utils.CCE):
    """
    Computes the confusion matrix from predictions and labels.
    
    The matrix columns represent the prediction labels and the rows represent the real labels.
    The confusion matrix is always a 2-D array of shape `(num_class, num_class)`.
    Both `predict` and `actual` must be 1-D arrays of the same shape in order for this function to work.
    
    Args:
        actual (tvm.tensor.Tensor): 1-D tensor of type int32.
        predict (tvm.tensor.Tensor): 1-D tensor of type int32.
        num_class (int): The number of valid labels for a given classification task.
    
    Returns:
        tvm.tensor.Tensor, with shape `(num_class, num_class)` representing the confusion matrix.
    """
    utils.check_shape(actual, length=1, tensor_name="actual")
    utils.check_shape(predict, length=1, tensor_name="predict")
    utils.check_equal("length of actual", "length of predict",
                      actual.shape[0].value, predict.shape[0].value)
    utils.ops_dtype_check([actual.dtype, predict.dtype],
                          utils.DtypeForDavinci.INT32)

    N = num_class
    K = actual.shape[0].value
    k = akg.tvm.reduce_axis((0, K), "k")

    # reduce over first axis
    tmp = akg.tvm.compute(
        (K, N, N),
        lambda k, i, j: akg.tvm.expr.Select(
            akg.tvm.all(i == actual[k], j == predict[k]), 1, 0),
        name="tmp")
    output = akg.tvm.compute((N, N),
                             lambda i, j: akg.tvm.sum(tmp[k][i][j], axis=k))

    return output
Пример #6
0
def slice(data, begin, size):
    """
    Extracts a slice from a tensor.

    Args:
        data (tvm.tensor.Tensor): Input data of type float16, float32, int32.
        begin (Union[tuple, list]): Specifies the start index of a slice.
        size (Union[tuple, list]): Specifies the size of a slice.
    
    Returns:
        tvm.tensor.Tensor, has the same type as input tensor data.
    """

    shape = get_shape(data)
    utils.check_shape(shape)
    utils.check_equal("len(shape)", "len(begin)", len(shape), len(begin))
    utils.check_equal("len(shape)", "len(size)", len(shape), len(size))
    utils.ops_dtype_check(
        data.dtype,
        [utils.DtypeForDavinci.ALL_FLOAT, utils.DtypeForDavinci.INT32])

    dim_info, _ = slice_set_dim_func(data, begin, size)
    attrs = {"dim": dim_info}

    out_shape = [
        size[i] if size[i] > 0 else shape[i] - begin[i]
        for i in range(len(shape))
    ]

    def slice_index(*inputs):
        return [begin[i] + inputs[i] for i in range(len(inputs))]

    res = akg.tvm.compute(out_shape,
                          lambda *i: data(*slice_index(*i)),
                          name='res')

    return res, attrs