Beispiel #1
0
def within_index(b, e, s, i):
    """Return a boolean value that indicates if i is within the given index.

    Parameters
    ----------
    b : Expr
      beginning of the index

    e : Expr
      end of the index

    s : Expr
      strides of index

    i : Expr
      array position

    Returns
    -------
    selected: Expr
        bool expression that is True is the array position would be selected
        by the index and False otherwise
    """
    bc = tvm.tir.Select(s < 0, i <= e, i < b)
    ec = tvm.tir.Select(s < 0, i > b, i >= e)
    ss = te.if_then_else(s < 0, ((i - e) + (e % te.abs(s)) + 1) % te.abs(s),
                         (i - b) % s)
    return tvm.tir.Select(tvm.tir.Or(bc, ec), tvm.tir.const(False),
                          ss.equal(0))
Beispiel #2
0
 def __reflect(index, size, corner_start):
     index_align_corner = te.abs(corner_start - index)
     size_times = te.truncdiv(index_align_corner.astype("int32"), size).astype("int32")
     t = tir.Mod(size_times, 2)
     extra = index_align_corner - size_times * size
     return tir.if_then_else(
         tir.EQ(t, 0), extra + corner_start, size - extra + corner_start
     )
Beispiel #3
0
def abs(x):
    """Take absolute value of the input of x, element-wise.

    Parameters
    ----------
    x : tvm.te.Tensor
        Input argument.

    Returns
    -------
    y : tvm.te.Tensor
        The result.
    """
    return te.compute(x.shape, lambda *i: te.abs(x(*i)))
Beispiel #4
0
def make_idx(b, e, s, z, i):
    """Return the array position in the selection that corresponds to an
    array position in the full array.

    The returned value is only meaningful if within_index() returns True
    for the same set of parameters.

    Parameter
    ---------
    b : Expr
      beginning of the index

    e : Expr
      end of the index

    s : Expr
      strides of index

    z : Expr
      size of the indexed dimension

    i : Expr
      array position

    Returns
    -------
    postion: Expr
        int expression that corresponds to an array position in the selection.
    """
    bc = tvm.tir.Select(s < 0, i <= e, i < b)
    ec = tvm.tir.Select(s < 0, i > b, i >= e)

    # Clamp to array size
    b = tvm.tir.Select(z < b, z - 1, b)

    ss = tvm.tir.if_then_else(s < 0,
                              (b - i) // te.abs(s),
                              (i - b) // s)
    return tvm.tir.if_then_else(tvm.tir.Or(bc, ec), 88, ss)
def test_basic_operation():
    np.random.seed(0)
    shape = (10, 10)
    x = te.var("x", dtype='float32')
    k = te.reduce_axis((0, 10), name="k")
    l = te.reduce_axis((0, 10), name="l")
    A0 = te.placeholder(shape, name='A0')
    A1 = te.placeholder(shape, name='A1')
    zeros = np.zeros(shape)

    B = te.compute(shape, lambda i, j: A0[i, j], name='B')
    check_grad(B, [A0])

    B = te.compute(shape, lambda i, j: A0[i, j] + A1[i, j], name='B')
    check_grad(B, [A0, A1])

    B = te.compute(shape, lambda i, j: A0[i, j] + A0[j, i], name='B')
    check_grad(B, A0)

    B = te.compute(shape, lambda i, j: te.floor(A0[i, j]), name='B')
    check_grad(B, A0, desired_grads=[zeros])

    B = te.compute(shape, lambda i, j: te.ceil(A0[i, j]), name='B')
    check_grad(B, A0, desired_grads=[zeros])

    B = te.compute(shape, lambda i, j: te.trunc(A0[i, j]), name='B')
    check_grad(B, A0, desired_grads=[zeros])

    B = te.compute(shape, lambda i, j: te.round(A0[i, j]), name='B')
    check_grad(B, A0, desired_grads=[zeros])

    B = te.compute(shape, lambda i, j: A0[i, j] + te.exp(A0[j, i]), name='B')
    check_grad(B, A0)

    B = te.compute(
        shape,
        lambda i, j: te.log(0.1 + te.abs(A0[i, j] + te.exp(A0[j, i]))),
        name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.sigmoid(A0[i, j] * A0[i, j] * A0[j, i]),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.tanh(A0[i, j] * A0[i, j] * A0[j, i]),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.sqrt(A0[i, j] * A0[i, j] * A0[j, i]),
                   name='B')
    check_grad(B, A0, data_range=(0.1, 10))

    B = te.compute(shape,
                   lambda i, j: te.power(te.abs(A0[i, j]), A0[j, i]),
                   name='B')
    check_grad(B, A0, data_range=(-4, 4))

    B = te.compute(shape, lambda i, j: A0[i, j] * A0[j, i], name='B')
    check_grad(B, A0)

    B = te.compute((10, ),
                   lambda i: te.sum(A0[i, k] * A0[k, i], axis=k),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.sum(A0[i, k] * A0[k, i] + 5, axis=k),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: te.max(A0[i, k] * A0[k, j] + 5, axis=k),
                   name='B')
    check_grad(B, A0)

    B = te.compute(shape,
                   lambda i, j: A0[i, j] * (A1[j, i] + A0[j, i]),
                   name='B')
    check_grad(B, [A0, A1])

    B = te.compute(shape,
                   lambda i, j: te.sum(
                       A0[k, k] - A0[te.min(j + k, 9), j] * A0[i, k], axis=k),
                   name='B')
    check_grad(B, A0)

    def fcombine(x, y):
        return x * y

    def fidentity(t0):
        return tvm.tir.const(1, t0)

    prod = te.comm_reducer(fcombine, fidentity, name='prod')
    B = te.compute((10, 10),
                   lambda i, j: prod(A0[i, k] + A0[k, i], axis=k),
                   name='B')
    check_grad(B, A0)

    X = te.placeholder((10, ), name='X')
    A = te.compute((10, ), lambda i: X[i] + X[9 - i])
    B = te.compute((10, ), lambda i: X[i] * X[9 - i])
    Y = topi.tensordot(A, B, 1)
    check_grad(Y, X)
Beispiel #6
0
def correlation_nchw(data1, data2, kernel_size, max_displacement, stride1, stride2, padding,
                     is_multiply):
    """Correlation operator in NCHW layout.

    Parameters
    ----------
    data1 : tvm.te.Tensor
        4-D with shape [batch, channel, height, width]

    data2 : tvm.te.Tensor
        4-D with shape [batch, channel, height, width]

    kernel_size: int
        Kernel size for correlation, must be an odd number

    max_displacement: int
        Max displacement of Correlation

    stride1: int
        Stride for data1

    stride2: int
        Stride for data2 within the neightborhood centered around data1

    padding : int or a list/tuple of 2 or 4 ints
        Padding size, or
        [pad_height, pad_width] for 2 ints, or
        [pad_top, pad_left, pad_bottom, pad_right] for 4 ints

    is_multiply: bool
        operation type is either multiplication or substraction

    Returns
    -------
    Output : tvm.te.Tensor
        4-D with shape [batch, out_channel, out_height, out_width]
    """
    # pylint: disable=unnecessary-lambda, invalid-name
    data_shape = get_const_tuple(data1.shape)
    assert get_const_tuple(data2.shape) == data_shape, "data1 and data2 should have the same shape"
    assert kernel_size > 0 and kernel_size % 2, "kernel_size should be non-negative odd number"
    if isinstance(padding, (tuple, list)):
        if len(padding) == 2:
            pad_before_h = pad_after_h = padding[0]
            pad_before_w = pad_after_w = padding[1]
        elif len(padding) == 4:
            pad_before_h, pad_before_w, pad_after_h, pad_after_w = padding
        else:
            raise ValueError("invalid padding")
    elif isinstance(padding, int):
        pad_before_h = pad_after_h = pad_before_w = pad_after_w = padding
    else:
        raise ValueError("invalid padding")
    pad_before = [0, 0, pad_before_h, pad_before_w]
    pad_after = [0, 0, pad_after_h, pad_after_w]
    padded_data1 = pad(data1, pad_before, pad_after)
    padded_data2 = pad(data2, pad_before, pad_after)

    batch, channel, height, width = data_shape

    kernel_radius = (kernel_size - 1) // 2
    border_size = max_displacement + kernel_radius
    displacement_radius = max_displacement // stride2
    displacement_size = 2 * displacement_radius + 1

    padded_width = width + pad_before_w + pad_after_w
    padded_height = height + pad_before_h + pad_after_h
    out_channel = displacement_size * displacement_size
    out_height = (padded_height - 2 * border_size + stride1 - 1) // stride1
    out_width = (padded_width - 2 * border_size + stride1 - 1) // stride1

    rc = te.reduce_axis((0, channel), name='rc')
    ry = te.reduce_axis((0, kernel_size), name='ry')
    rx = te.reduce_axis((0, kernel_size), name='rx')

    if is_multiply:
        corr_func = lambda x, y: x * y
    else:
        corr_func = lambda x, y: te.abs(x - y)

    def _compute_correlation(n, q, i, j):
        # location in data1
        y1 = i * stride1 + max_displacement
        x1 = j * stride1 + max_displacement
        # location in data2
        y2 = y1 + (te.indexdiv(q, displacement_size) - displacement_radius) * stride2
        x2 = x1 + (te.indexmod(q, displacement_size) - displacement_radius) * stride2
        return te.sum(corr_func(padded_data1[n, rc, y1 + ry, x1 + rx],
                                padded_data2[n, rc, y2 + ry, x2 + rx]), axis=[rc, ry, rx])

    correlation = te.compute((batch, out_channel, out_height, out_width), lambda n, q, i, j:
                             _compute_correlation(n, q, i, j), tag="correlation_nchw")
    return correlation / (kernel_size * kernel_size * channel)