Exemple #1
0
    def _impl(inputs, attr, params, prelude):
        dtype_str = attr["element_dtype"].name
        input_ta_shape = get_tensor_array_shape(inputs[0], dtype_str, prelude)

        if input_ta_shape is None:
            stack_func = prelude.get_global_var("tensor_array_stack",
                                                dtype_str)
            out = stack_func(inputs[0])
        else:
            static_tensor_array_ops = StaticTensorArrayOps(
                prelude, dtype_str, input_ta_shape)
            static_tensor_array_ops.register()
            stack_func = prelude.get_global_var_static("tensor_array_stack",
                                                       dtype_str,
                                                       input_ta_shape)
            out_tensor = stack_func(inputs[0])
            out_shape = (Any(), ) + input_ta_shape
            static_tensor_array_ops = StaticTensorArrayOps(
                prelude, dtype_str, out_shape)
            static_tensor_array_ops.register()
            get_data_func = prelude.get_global_var_static(
                "tensor_get_data", dtype_str, out_shape)
            out = get_data_func(out_tensor)

        return out
Exemple #2
0
    def run(dtype, shape, value_shape=None, lengths_shape=None):
        mod = tvm.IRModule()
        p = Prelude(mod)
        static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
        static_tensor_array_ops.register()
        if value_shape is not None or lengths_shape is not None:
            static_tensor_array_ops.define_tensor_array_split(value_shape, lengths_shape, False)

        # tensor array
        v1 = relay.var("v1")
        v2 = relay.var("v2")
        v3 = relay.var("v2")

        adt_shape = [
            relay.Any(),
        ] + shape[1:]
        test_ops = StaticTensorArrayOps(p, dtype, adt_shape)
        test_ops.register()
        tensor_array = test_ops.get_global_var("tensor_array")

        tensor_array1 = tensor_array(relay.const(3))
        write_func = test_ops.get_global_var("tensor_array_write")
        split_ops = StaticTensorArrayOps(p, dtype, shape)
        split_ops.register()
        split_func = split_ops.get_global_var("tensor_array_split")
        tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, test_ops.shape)
        tensor_array1 = write_func(tensor_array1, relay.const(0), tensor(v1))
        tensor_array1 = write_func(tensor_array1, relay.const(1), tensor(v2))
        tensor_array1 = write_func(tensor_array1, relay.const(2), tensor(v3))

        # value tensor
        value = relay.var("value")

        # lengths tensor
        ta_len = relay.var("length")

        # create the split function
        if value_shape is None:
            tensor1 = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
        else:
            static_tensor_array_ops = StaticTensorArrayOps(p, dtype, value_shape)
            static_tensor_array_ops.register()
            tensor1 = p.get_tensor_ctor_static("tensor_constructor", dtype, test_ops.shape)

        tensor_array_split = split_func(tensor_array1, tensor1(value), ta_len)
        mod["main"] = relay.Function([v1, v2, v3, value, ta_len], tensor_array_split)

        # initialize and check
        v1_data = np.random.uniform(low=0.0, high=8.0, size=[2, 3]).astype(dtype)
        v2_data = np.random.uniform(low=0.0, high=8.0, size=[2, 3]).astype(dtype)
        v3_data = np.random.uniform(low=0.0, high=8.0, size=[2, 3]).astype(dtype)
        value_data = np.random.uniform(low=0.0, high=8.0, size=value_shape or shape).astype(dtype)
        length_data = np.array([2, 2], dtype="int32")
        expected = np.concatenate([value_data, v3_data])
        expected = np.split(expected, indices_or_sections=[2, 4])
        check_tensor_array(
            mod, expected, *(v1_data, v2_data, v3_data, value_data, length_data), dtype=dtype
        )
    def run(dtype, shape):
        mod = tvm.IRModule()
        p = Prelude(mod)
        static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
        static_tensor_array_ops.register()

        ta_length = 2
        np_data_list = [
            np.random.uniform(0, 10, size=shape).astype(dtype)
            for _ in range(ta_length)
        ]

        v0 = relay.var("v0")
        v1 = relay.var("v1")
        tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
        init_tensor_array = tensor_array(relay.const(ta_length))
        write_func = p.get_global_var_static("tensor_array_write", dtype,
                                             shape)
        tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
        tensor_array0 = write_func(init_tensor_array, relay.const(0),
                                   tensor(v0))
        tensor_array1 = write_func(tensor_array0, relay.const(1), tensor(v1))
        mod["main"] = relay.Function([v0, v1], tensor_array1)
        expected = np_data_list
        check_tensor_array(mod, expected, *np_data_list, dtype=dtype)
Exemple #4
0
    def run(dtype, shape):
        mod = tvm.IRModule()
        p = Prelude(mod)
        static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
        static_tensor_array_ops.register()

        np_data_list = []
        ta_length = 3
        for _ in range(ta_length):
            np_data_list.append(np.random.uniform(0, 10, size=shape).astype(dtype))

        v0 = relay.var("v0")
        v1 = relay.var("v1")
        v2 = relay.var("v2")
        n = relay.var("n")
        tensor = p.get_var_static("tensor_constructor", dtype, shape)
        tensor_array = p.get_var_static("tensor_array", dtype, shape)
        init_tensor_array = tensor_array(relay.const(ta_length))
        read_func = p.get_var_static("tensor_array_read", dtype, shape)
        write_func = p.get_var_static("tensor_array_write", dtype, shape)
        get_data_func = p.get_var_static("tensor_get_data", dtype, shape)
        tensor_array0 = write_func(init_tensor_array, relay.const(0), tensor(v0))
        tensor_array1 = write_func(tensor_array0, relay.const(1), tensor(v1))
        tensor_array2 = write_func(tensor_array1, relay.const(2), tensor(v2))

        mod["main"] = relay.Function([v0, v1, v2, n], get_data_func(read_func(tensor_array2, n)))
        expected = [np_data_list[0]]
        check_tensor_array(mod, expected, *list(np_data_list + [0]), dtype=dtype)
        expected = [np_data_list[1]]
        check_tensor_array(mod, expected, *list(np_data_list + [1]), dtype=dtype)
        expected = [np_data_list[2]]
        check_tensor_array(mod, expected, *list(np_data_list + [2]), dtype=dtype)
    def run(dtype, shape):
        mod = tvm.IRModule()
        p = Prelude(mod)
        static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
        static_tensor_array_ops.register()

        v1 = relay.var("v1")
        v2 = relay.var("v2")
        tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
        tensor_array1 = tensor_array(relay.const(2))
        write_func = p.get_global_var_static("tensor_array_write", dtype,
                                             shape)
        concat_func = p.get_global_var_static("tensor_array_concat", dtype,
                                              shape)
        tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
        tensor_array1 = write_func(tensor_array1, relay.const(0), tensor(v1))
        tensor_array1 = write_func(tensor_array1, relay.const(1), tensor(v2))
        tensor_array_concat = concat_func(tensor_array1)
        mod["main"] = relay.Function([v1, v2], tensor_array_concat)
        v1_data = np.random.uniform(low=0.0, high=8.0,
                                    size=(2, 3)).astype(dtype)
        v2_data = np.random.uniform(low=0.0, high=8.0,
                                    size=(1, 3)).astype(dtype)
        expected = [np.concatenate((v1_data, v2_data), axis=0)]
        check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)
    def run(dtype, shape, indices_shape=None):
        mod = tvm.IRModule()
        p = Prelude(mod)
        static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
        static_tensor_array_ops.register()
        if indices_shape is not None:
            static_tensor_array_ops.define_tensor_array_scatter(
                indices_shape, True)

        # tensor array
        v1 = relay.var("v1")
        v2 = relay.var("v2")
        v3 = relay.var("v2")
        tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
        tensor_array0 = tensor_array(relay.const(3))
        write_func = p.get_global_var_static("tensor_array_write", dtype,
                                             shape)
        scatter_func = p.get_global_var_static("tensor_array_scatter", dtype,
                                               shape)
        tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
        tensor_array1 = write_func(tensor_array0, relay.const(0), tensor(v1))
        tensor_array1 = write_func(tensor_array1, relay.const(1), tensor(v2))
        tensor_array1 = write_func(tensor_array1, relay.const(2), tensor(v3))

        # indices array
        index = relay.var("index")

        # values array
        value_0 = relay.var("value_0")
        value_1 = relay.var("value_1")
        values_array = tensor_array(relay.const(2))
        values_array = write_func(values_array, relay.const(0),
                                  tensor(value_0))
        values_array = write_func(values_array, relay.const(1),
                                  tensor(value_1))

        # create the scatter function
        tensor_array_scatter = scatter_func(tensor_array1, index, values_array)
        mod["main"] = relay.Function([v1, v2, v3, index, value_0, value_1],
                                     tensor_array_scatter)

        # initialize and check
        v1_data = np.random.uniform(low=0.0, high=8.0,
                                    size=shape).astype(dtype)
        v2_data = np.random.uniform(low=0.0, high=8.0,
                                    size=shape).astype(dtype)
        v3_data = np.random.uniform(low=0.0, high=8.0,
                                    size=shape).astype(dtype)
        index_data = np.array([0, 1], dtype="int32")
        val1_data = np.random.uniform(low=0.0, high=8.0,
                                      size=shape).astype(dtype)
        val2_data = np.random.uniform(low=0.0, high=8.0,
                                      size=shape).astype(dtype)
        expected = [val1_data, val2_data, v3_data]
        check_tensor_array(
            mod,
            expected,
            *(v1_data, v2_data, v3_data, index_data, val1_data, val2_data),
            dtype=dtype,
        )
Exemple #7
0
 def run(dtype, shape):
     mod = tvm.IRModule()
     p = Prelude(mod)
     static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
     static_tensor_array_ops.register()
     tensor_constructor = p.get_name_static("tensor_constructor", dtype, shape)
     assert tensor_constructor != None
Exemple #8
0
    def run(dtype, shape):
        mod = tvm.IRModule()
        p = Prelude(mod)
        static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
        static_tensor_array_ops.register()

        unstack_tensor = p.get_var_static("tensor_array_unstack", dtype, shape)
        v = relay.var("v")
        mod["main"] = relay.Function([v], unstack_tensor(v))
        t = np.random.uniform(low=0, high=10, size=shape).astype(dtype)
        (*expected,) = t
        check_tensor_array(mod, expected, t, dtype=dtype)
Exemple #9
0
    def run(dtype, shape):
        x = relay.var("x")
        mod = tvm.IRModule()
        p = Prelude(mod)
        static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
        static_tensor_array_ops.register()

        expand_dims_func = p.get_var_static("tensor_expand_dims", dtype, shape)
        tensor = p.get_var_static("tensor_constructor", dtype, shape)
        mod["main"] = relay.Function([x], expand_dims_func(tensor(x)))
        x_np = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
        expected = [np.expand_dims(x_np, axis=0)]
        check_tensor_array(mod, expected, x_np)
Exemple #10
0
    def _impl(inputs, attr, params, prelude):
        dtype_str = attr.get("element_dtype").name
        elem_shape = _infer_value(inputs[0], params, prelude.mod)
        elem_shape = tuple(elem_shape.numpy().astype("int32").flatten())

        if elem_shape or "shape" in attr:
            shape = attr["shape"] if "shape" in attr else elem_shape
            static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, shape)
            static_tensor_array_ops.register()
            tensor_array_constructor = static_tensor_array_ops.get_global_var("tensor_array")
            tensor_array = tensor_array_constructor(inputs[1])
        else:
            tensor_array_constructor = prelude.get_global_var("tensor_array", dtype_str)
            tensor_array = tensor_array_constructor(inputs[1])
        return tensor_array
Exemple #11
0
    def _impl(inputs, attr, params, prelude):
        dtype_str = attr["element_dtype"].name
        input_ta_shape = _infer_type_with_prelude(inputs[0], prelude).shape

        if input_ta_shape is None:
            unstack_func = prelude.get_global_var("tensor_array_unstack", dtype_str)
            out = unstack_func(inputs[0])
        else:
            static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_ta_shape)
            static_tensor_array_ops.register()
            unstack_func = prelude.get_global_var_static(
                "tensor_array_unstack", dtype_str, input_ta_shape
            )
            out = unstack_func(inputs[0])
        return out
Exemple #12
0
    def _impl(inputs, attr, params, prelude):
        dtype_str = attr["element_dtype"].name
        input_shape = get_tensor_array_shape(inputs[0], dtype_str, prelude)

        if input_shape is None:
            read_func = prelude.get_global_var("tensor_array_read", dtype_str)
            out = read_func(inputs[0], _op.take(inputs[1], tvm.relay.const(0)))
        else:
            static_tensor_array_ops = StaticTensorArrayOps(prelude, dtype_str, input_shape)
            static_tensor_array_ops.register()
            read_func = static_tensor_array_ops.get_global_var("tensor_array_read")
            out_tensor = read_func(inputs[0], _op.take(inputs[1], tvm.relay.const(0)))
            get_data_func = static_tensor_array_ops.get_global_var("tensor_get_data")
            out = get_data_func(out_tensor)
        return out
Exemple #13
0
    def run(dtype, shape):
        mod = tvm.IRModule()
        p = Prelude(mod)
        static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
        static_tensor_array_ops.register()

        concat = p.get_var_static("tensor_concatenate", dtype, shape)
        tensor = p.get_var_static("tensor_constructor", dtype, shape)
        v1 = relay.var("v1")
        v2 = relay.var("v2")
        mod["main"] = relay.Function([v1, v2], concat(tensor(v1), tensor(v2)))
        v1_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
        v2_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
        expected = [np.concatenate((v1_data, v2_data))]
        check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)
Exemple #14
0
    def _impl(inputs, attr, params, prelude):
        dtype_str = attr.get("element_dtype").name
        input_ta = inputs[0]
        input_ta_shape = get_tensor_array_shape(input_ta, dtype_str, prelude)
        input_t_shape = _infer_type_with_prelude(inputs[2], prelude).shape
        input_rank = len(input_t_shape)

        if input_ta_shape is None:
            tensor_name = "tensor{}".format(input_rank)
            tensor_func = prelude.get_tensor_ctor(tensor_name, dtype_str)
            v = tensor_func(inputs[2])
            write_func = prelude.get_global_var("tensor_array_write",
                                                dtype_str)
            out = write_func(input_ta, inputs[1], v)
        else:
            static_tensor_array_ops = StaticTensorArrayOps(
                prelude, dtype_str, input_ta_shape)
            static_tensor_array_ops.register()
            tensor_func = static_tensor_array_ops.get_ctor(
                "tensor_constructor")
            v = tensor_func(inputs[2])
            # Write tensor with more static shape
            # convert shape with -1 to any()
            input_ta_shape_a = []
            for dim in input_ta_shape:
                if isinstance(dim, (int, tvm.tir.expr.IntImm)):
                    if dim < 0:
                        input_ta_shape_a.append(Any())
                    else:
                        input_ta_shape_a.append(dim)
                else:
                    input_ta_shape_a.append(dim)
            actual_shape = _get_more_static_shape_rank(input_t_shape,
                                                       input_ta_shape_a)
            if actual_shape != input_ta_shape_a:
                new_shape = []
                num_any_dim = 0
                for dim in actual_shape:
                    if not isinstance(dim, int):
                        num_any_dim += 1
                    new_shape.append(dim if isinstance(dim, int) else -1)
                if num_any_dim <= 1:
                    v = tensor_func(_op.reshape(inputs[2], new_shape))
            write_func = prelude.get_global_var_static("tensor_array_write",
                                                       dtype_str,
                                                       input_ta_shape_a)
            out = write_func(input_ta, inputs[1], v)
        return out
Exemple #15
0
    def run(dtype, shape):
        mod = tvm.IRModule()
        p = Prelude(mod)
        static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
        static_tensor_array_ops.register()

        take = p.get_var_static("tensor_take", dtype, shape)
        tensor_constructor = p.get_var_static("tensor_constructor", dtype, shape)
        v = relay.var("v")
        lower = relay.var("lower")
        upper = relay.var("upper")
        mod["main"] = relay.Function([v, lower, upper], take(tensor_constructor(v), lower, upper))
        v_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
        expected = [np.take(v_data, range(2, 5), axis=0)]
        check_tensor_array(mod, expected, *(v_data, 2, 5), dtype=dtype)
        expected = [np.take(v_data, range(0, 9), axis=0)]
        check_tensor_array(mod, expected, *(v_data, 0, 9), dtype=dtype)
Exemple #16
0
    def run(dtype, shape):
        mod = tvm.IRModule()
        p = Prelude(mod)
        static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
        static_tensor_array_ops.register()

        tensor_array = p.get_var_static("tensor_array", dtype, shape)
        tensor = p.get_var_static("tensor_constructor", dtype, shape)
        write = p.get_var_static("tensor_array_write", dtype, shape)
        stack = p.get_var_static("tensor_array_stack", dtype, shape)
        v = relay.var("v")
        init_tensor_array = tensor_array(relay.const(3))
        tensor_array1 = write(init_tensor_array, relay.const(0), tensor(v))
        tensor_array2 = write(tensor_array1, relay.const(1), tensor(v))
        tensor_array3 = write(tensor_array2, relay.const(2), tensor(v))
        tensor_array4 = stack(tensor_array3)
        mod["main"] = relay.Function([v], tensor_array4)
        t = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
        expected = [np.stack([t, t, t])]
        check_tensor_array(mod, expected, t, dtype=dtype)
Exemple #17
0
    def run(dtype, shape):
        mod = tvm.IRModule()
        p = Prelude(mod)
        static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
        static_tensor_array_ops.register()

        tensor_array = p.get_var_static("tensor_array", dtype, shape)
        tensor = p.get_var_static("tensor_constructor", dtype, shape)
        write = p.get_var_static("tensor_array_write", dtype, shape)
        gather = p.get_var_static("tensor_array_gather", dtype, shape)
        v = relay.var("v")
        indice = relay.var("indice")
        init_tensor_array = tensor_array(relay.const(3))
        tensor_array1 = write(init_tensor_array, relay.const(0), tensor(v))
        tensor_array2 = write(tensor_array1, relay.const(1), tensor(v))
        tensor_array3 = write(tensor_array2, relay.const(2), tensor(v))
        out = gather(tensor_array3, indice)
        mod["main"] = relay.Function([v, indice], out)
        t = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
        indice_data = np.array([0, 2], dtype="int32")
        expected = [np.stack([t, t])]
        check_tensor_array(mod, expected, *(t, indice_data), dtype=dtype)