Beispiel #1
0
    def test_tile_with_dynamic_shape(self, _, shape, dims):
        class Tile(nn.Module):
            def __init__(self, dims):
                super().__init__()
                self.dims = dims

            def forward(self, x):
                return torch.tile(x, self.dims)

        input_specs = [
            InputTensorSpec(
                shape=shape,
                dtype=torch.float32,
                shape_ranges=[
                    (
                        tuple(i if i != -1 else 1 for i in shape),
                        tuple(i if i != -1 else 2 for i in shape),
                        tuple(i if i != -1 else 3 for i in shape),
                    )
                ],
            ),
        ]
        self.run_test_with_dynamic_shape(
            Tile(dims), input_specs, expected_ops={acc_ops.tile}
        )
Beispiel #2
0
    def test_cat_with_dynamic_shape(self):
        class Cat(nn.Module):
            def forward(self, x, y):
                x = x + y
                return torch.cat((x, y), 0)

        input_specs = [
            InputTensorSpec(
                shape=(-1, -1, -1),
                dtype=torch.float32,
                shape_ranges=[((1, 1, 1), (2, 3, 4), (2, 3, 10))],
            ),
            InputTensorSpec(
                shape=(-1, -1, -1),
                dtype=torch.float32,
                shape_ranges=[((1, 1, 1), (2, 3, 4), (2, 3, 10))],
            ),
        ]
        self.run_test_with_dynamic_shape(Cat(), input_specs, expected_ops={acc_ops.cat})
Beispiel #3
0
    def test_elementwise_op_with_dynamic_shape(
        self, _, x_shape, x_shape_ranges, y_shape, y_shape_ranges, orig_op, expected_op
    ):
        class Op(nn.Module):
            def forward(self, x, y):
                return orig_op(x, y)

        input_specs = [
            InputTensorSpec(
                shape=x_shape,
                dtype=torch.float32,
                shape_ranges=[x_shape_ranges],
            ),
            InputTensorSpec(
                shape=y_shape,
                dtype=torch.float32,
                shape_ranges=[y_shape_ranges],
            ),
        ]

        self.run_test_with_dynamic_shape(Op(), input_specs, expected_ops={expected_op})
Beispiel #4
0
    def test_silu_with_dynamic_shape(self):
        class Silu(nn.Module):
            def forward(self, x):
                return torch.nn.functional.silu(x)

        input_specs = [
            InputTensorSpec(
                shape=(-1, -1, -1),
                dtype=torch.float32,
                shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))],
            ),
        ]
        self.run_test_with_dynamic_shape(
            Silu(), input_specs, expected_ops={acc_ops.sigmoid, acc_ops.mul})
Beispiel #5
0
    def test_leaky_relu_with_dynamic_shape(self):
        class TestModule(nn.Module):
            def forward(self, x):
                return nn.functional.leaky_relu(x)

        input_specs = [
            InputTensorSpec(
                shape=(-1, -1, -1),
                dtype=torch.float32,
                shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))],
            ),
        ]
        self.run_test_with_dynamic_shape(
            TestModule(), input_specs, expected_ops={acc_ops.leaky_relu}
        )
Beispiel #6
0
    def test_permute_with_dynamic_shape(self):
        class Permute(nn.Module):
            def forward(self, x):
                return x.permute(1, 2, 0)

        input_specs = [
            InputTensorSpec(
                shape=(-1, -1, -1),
                dtype=torch.float32,
                shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))],
            ),
        ]
        self.run_test_with_dynamic_shape(Permute(),
                                         input_specs,
                                         expected_ops={acc_ops.permute})
    def test_squeeze_with_dynamic_shape(self):
        class Squeeze(nn.Module):
            def forward(self, x):
                return x.squeeze(0)

        input_specs = [
            InputTensorSpec(
                shape=(1, -1, 2),
                dtype=torch.float32,
                shape_ranges=[((1, 1, 2), (1, 2, 2), (1, 3, 2))],
            ),
        ]
        self.run_test_with_dynamic_shape(Squeeze(),
                                         input_specs,
                                         expected_ops={acc_ops.squeeze})
Beispiel #8
0
    def test_tanh_with_dynamic_shape(self):
        class Tanh(nn.Module):
            def forward(self, x):
                return torch.tanh(x)

        input_specs = [
            InputTensorSpec(
                shape=(-1, -1, -1),
                dtype=torch.float32,
                shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))],
            ),
        ]
        self.run_test_with_dynamic_shape(Tanh(),
                                         input_specs,
                                         expected_ops={acc_ops.tanh})
    def test_dequantize_with_dynamic_shape(self):
        class TestModule(nn.Module):
            def forward(self, x):
                x = torch.quantize_per_tensor(x, 1, 0, torch.quint8)
                return x.dequantize()

        input_specs = [
            InputTensorSpec(
                shape=(-1, -1, -1),
                dtype=torch.float32,
                shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))],
            ),
        ]
        self.run_test_with_dynamic_shape(
            TestModule(), input_specs, expected_ops={acc_ops.dequantize}
        )
Beispiel #10
0
    def test_size_dynamic_shape(self):
        class Size(nn.Module):
            def forward(self, x):
                bs = x.size(0)
                return x.view(bs, -1)

        input_specs = [
            InputTensorSpec(
                shape=(-1, 12, 32),
                dtype=torch.float32,
                shape_ranges=[((1, 12, 32), (3, 12, 32), (100, 12, 32))],
            ),
        ]
        self.run_test_with_dynamic_shape(Size(),
                                         input_specs,
                                         expected_ops={acc_ops.size})
Beispiel #11
0
    def test_reshape_with_dynamic_shape(self, target_shape):
        class TestModule(torch.nn.Module):
            def __init__(self, target_shape):
                super().__init__()
                self.target_shape = target_shape

            def forward(self, x):
                return torch.reshape(x, self.target_shape)

        input_specs = [
            InputTensorSpec(
                shape=(-1, -1, -1),
                dtype=torch.float32,
                shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))],
            ),
        ]
        self.run_test_with_dynamic_shape(TestModule(target_shape),
                                         input_specs,
                                         expected_ops={acc_ops.reshape})
    def test_unsqueeze_with_dynamic_shape(self, _, dim):
        class Unsqueeze(nn.Module):
            def __init__(self, dim):
                super().__init__()
                self.dim = dim

            def forward(self, x):
                return torch.unsqueeze(x, self.dim)

        input_specs = [
            InputTensorSpec(
                shape=(-1, 2, 3),
                dtype=torch.float32,
                shape_ranges=[((1, 2, 3), (2, 2, 3), (3, 2, 3))],
            ),
        ]
        self.run_test_with_dynamic_shape(Unsqueeze(dim),
                                         input_specs,
                                         expected_ops={acc_ops.unsqueeze})
Beispiel #13
0
    def test_max_pool2d_with_dynamic_shape(self, ):
        class TestModule(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.max_pool = torch.nn.MaxPool2d(1, 1)

            def forward(self, x):
                return self.max_pool(x)

        input_specs = [
            InputTensorSpec(
                shape=(-1, -1, -1, -1),
                dtype=torch.float32,
                shape_ranges=[((1, 1, 1, 1), (1, 2, 4, 4), (2, 4, 4, 4))],
            ),
        ]
        self.run_test_with_dynamic_shape(TestModule(),
                                         input_specs,
                                         expected_ops={acc_ops.max_pool2d})
    def test_adaptive_avgpool_with_dynamic_shape(self):
        class TestModule(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.pool = torch.nn.AdaptiveAvgPool2d((64, 64))

            def forward(self, x):
                return self.pool(x)

        input_specs = [
            InputTensorSpec(
                shape=(-1, -1, 256, 256),
                dtype=torch.float32,
                shape_ranges=[((1, 1, 256, 256), (3, 3, 256, 256), (5, 5, 256, 256))],
            ),
        ]
        self.run_test_with_dynamic_shape(
            TestModule(), input_specs, expected_ops={acc_ops.adaptive_avg_pool2d}
        )
Beispiel #15
0
    def test_conv2d_with_dynamic_shape(self):
        class TestModule(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.conv = torch.nn.Conv2d(3, 6, 1)

            def forward(self, x):
                return self.conv(x)

        input_specs = [
            InputTensorSpec(
                shape=(-1, 3, -1, -1),
                dtype=torch.float32,
                shape_ranges=[((1, 3, 1, 1), (1, 3, 4, 4), (32, 3, 128, 128))],
            ),
        ]
        self.run_test_with_dynamic_shape(TestModule(),
                                         input_specs,
                                         expected_ops={acc_ops.conv2d})
Beispiel #16
0
    def test_batchnorm_with_dynamic_shape(self):
        class TestModule(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.bn = torch.nn.BatchNorm2d(3)

            def forward(self, x):
                return self.bn(x)

        input_specs = [
            InputTensorSpec(
                shape=(-1, 3, -1, -1),
                dtype=torch.float32,
                shape_ranges=[((1, 3, 1, 1), (1, 3, 5, 5), (2, 3, 10, 10))],
            ),
        ]

        self.run_test_with_dynamic_shape(TestModule(),
                                         input_specs,
                                         expected_ops={acc_ops.batch_norm})
Beispiel #17
0
    def test_layer_norm_with_dynamic_shape(self, _, normalized_shape, input_shape):
        class LayerNorm(nn.Module):
            def __init__(self, normalized_shape):
                super().__init__()
                self.mod = nn.LayerNorm(normalized_shape, eps=1e-02)

            def forward(self, x):
                return self.mod(x)

        input_specs = [
            InputTensorSpec(
                shape=(-1,) + input_shape,
                dtype=torch.float32,
                shape_ranges=[
                    ((1,) + input_shape, (4,) + input_shape, (10,) + input_shape)
                ],
            ),
        ]
        self.run_test_with_dynamic_shape(
            LayerNorm(normalized_shape), input_specs, expected_ops={acc_ops.layer_norm}
        )
Beispiel #18
0
    def test_linear_with_dynamic_shape(self):
        class TestModule(torch.nn.Module):
            def __init__(self):
                super().__init__()
                self.linear = torch.nn.Linear(512, 256)

            def forward(self, x):
                return self.linear(x)

        input_specs = [
            InputTensorSpec(
                shape=(-1, 3, 512),
                dtype=torch.float32,
                shape_ranges=[((1, 3, 512), (3, 3, 512), (4, 3, 512))],
            ),
        ]
        self.run_test_with_dynamic_shape(
            TestModule(),
            input_specs,
            expected_ops={acc_ops.linear},
        )
Beispiel #19
0
    def test_flatten_with_dynamic_shape(self, _, start_dim, end_dim):
        class Flatten(nn.Module):
            def __init__(self, start, end):
                super().__init__()
                self.start = start
                self.end = end

            def forward(self, x):
                return torch.flatten(x, self.start, self.end)

        input_specs = [
            InputTensorSpec(
                shape=(-1, -1, -1, -1, -1),
                dtype=torch.float32,
                shape_ranges=[((1, 1, 1, 1, 1), (1, 2, 3, 2, 1), (3, 3, 3, 3, 3))],
            ),
        ]
        self.run_test_with_dynamic_shape(
            Flatten(start_dim, end_dim),
            input_specs,
            expected_ops={acc_ops.flatten},
        )