def test_cat_with_dynamic_shape(self): class Cat(nn.Module): def forward(self, x, y): x = x + y return torch.cat((x, y), 0) input_specs = [ InputTensorSpec( shape=(-1, -1, -1), dtype=torch.float32, shape_ranges=[((1, 1, 1), (2, 3, 4), (2, 3, 10))], ), InputTensorSpec( shape=(-1, -1, -1), dtype=torch.float32, shape_ranges=[((1, 1, 1), (2, 3, 4), (2, 3, 10))], ), ] self.run_test_with_dynamic_shape(Cat(), input_specs, expected_ops={acc_ops.cat})
def test_softmax_with_dynamic_shape(self): class Softmax(nn.Module): def forward(self, x): return nn.functional.softmax(x) input_specs = [ InputTensorSpec( shape=(-1, -1, -1), dtype=torch.float32, shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] self.run_test_with_dynamic_shape(Softmax(), input_specs, expected_ops={acc_ops.softmax})
def test_squeeze_with_dynamic_shape(self): class Squeeze(nn.Module): def forward(self, x): return x.squeeze(0) input_specs = [ InputTensorSpec( shape=(1, -1, 2), dtype=torch.float32, shape_ranges=[((1, 1, 2), (1, 2, 2), (1, 3, 2))], ), ] self.run_test_with_dynamic_shape(Squeeze(), input_specs, expected_ops={acc_ops.squeeze})
def test_unsqueeze_with_dynamic_shape(self): class Unsqueeze(nn.Module): def forward(self, x): return torch.unsqueeze(x, 1) input_specs = [ InputTensorSpec( shape=(-1, 2, 3), dtype=torch.float32, shape_ranges=[((1, 2, 3), (2, 2, 3), (3, 2, 3))], ), ] self.run_test_with_dynamic_shape(Unsqueeze(), input_specs, expected_ops={acc_ops.unsqueeze})
def test_tanh_with_dynamic_shape(self): class Tanh(nn.Module): def forward(self, x): return torch.tanh(x) input_specs = [ InputTensorSpec( shape=(-1, -1, -1), dtype=torch.float32, shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] self.run_test_with_dynamic_shape(Tanh(), input_specs, expected_ops={acc_ops.tanh})
def test_elementwise_op_with_dynamic_shape(self, _, x_shape, x_shape_ranges, y_shape, y_shape_ranges, orig_op, expected_op): class Op(nn.Module): def forward(self, x, y): return orig_op(x, y) input_specs = [ InputTensorSpec( shape=x_shape, dtype=torch.float32, shape_ranges=[x_shape_ranges], ), InputTensorSpec( shape=y_shape, dtype=torch.float32, shape_ranges=[y_shape_ranges], ), ] self.run_test_with_dynamic_shape(Op(), input_specs, expected_ops={expected_op})
def test_permute_with_dynamic_shape(self): class Permute(nn.Module): def forward(self, x): return x.permute(1, 2, 0) input_specs = [ InputTensorSpec( shape=(-1, -1, -1), dtype=torch.float32, shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] self.run_test_with_dynamic_shape(Permute(), input_specs, expected_ops={acc_ops.permute})
def test_quantize_per_tensor_with_dynamic_shape(self): class TestModule(nn.Module): def forward(self, x): return torch.quantize_per_tensor(x, 1, 0, torch.quint8) input_specs = [ InputTensorSpec( shape=(-1, -1, -1), dtype=torch.float32, shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] self.run_test_with_dynamic_shape( TestModule(), input_specs, expected_ops={acc_ops.quantize_per_tensor})
def test_size_dynamic_shape(self): class Size(nn.Module): def forward(self, x): bs = x.size(0) return x.view(bs, -1) input_specs = [ InputTensorSpec( shape=(-1, 12, 32), dtype=torch.float32, shape_ranges=[((1, 12, 32), (3, 12, 32), (100, 12, 32))], ), ] self.run_test_with_dynamic_shape(Size(), input_specs, expected_ops={acc_ops.size})
def test_conv2d_with_dynamic_shape(self): class TestModule(torch.nn.Module): def __init__(self): super().__init__() self.conv = torch.nn.Conv2d(3, 6, 1) def forward(self, x): return self.conv(x) input_specs = [ InputTensorSpec( shape=(-1, 3, -1, -1), dtype=torch.float32, shape_ranges=[((1, 3, 1, 1), (1, 3, 4, 4), (32, 3, 128, 128))], ), ] self.run_test_with_dynamic_shape(TestModule(), input_specs, expected_ops={acc_ops.conv2d})
def test_adaptive_avgpool_with_dynamic_shape(self): class TestModule(torch.nn.Module): def __init__(self): super().__init__() self.pool = torch.nn.AdaptiveAvgPool2d((64, 64)) def forward(self, x): return self.pool(x) input_specs = [ InputTensorSpec( shape=(-1, -1, 256, 256), dtype=torch.float32, shape_ranges=[((1, 1, 256, 256), (3, 3, 256, 256), (5, 5, 256, 256))], ), ] self.run_test_with_dynamic_shape( TestModule(), input_specs, expected_ops={acc_ops.adaptive_avg_pool2d} )
def test_reshape_with_dynamic_shape(self, target_shape): class TestModule(torch.nn.Module): def __init__(self, target_shape): super().__init__() self.target_shape = target_shape def forward(self, x): return torch.reshape(x, self.target_shape) input_specs = [ InputTensorSpec( shape=(-1, -1, -1), dtype=torch.float32, shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))], ), ] self.run_test_with_dynamic_shape(TestModule(target_shape), input_specs, expected_ops={acc_ops.reshape})
def test_max_pool2d_with_dynamic_shape(self, ): class TestModule(torch.nn.Module): def __init__(self): super().__init__() self.max_pool = torch.nn.MaxPool2d(1, 1) def forward(self, x): return self.max_pool(x) input_specs = [ InputTensorSpec( shape=(-1, -1, -1, -1), dtype=torch.float32, shape_ranges=[((1, 1, 1, 1), (1, 2, 4, 4), (2, 4, 4, 4))], ), ] self.run_test_with_dynamic_shape(TestModule(), input_specs, expected_ops={acc_ops.max_pool2d})
def test_batchnorm_with_dynamic_shape(self): class TestModule(torch.nn.Module): def __init__(self): super().__init__() self.bn = torch.nn.BatchNorm2d(3) def forward(self, x): return self.bn(x) input_specs = [ InputTensorSpec( shape=(-1, 3, -1, -1), dtype=torch.float32, shape_ranges=[((1, 3, 1, 1), (1, 3, 5, 5), (2, 3, 10, 10))], ), ] self.run_test_with_dynamic_shape(TestModule(), input_specs, expected_ops={acc_ops.batch_norm})
def test_layer_norm_with_dynamic_shape(self, _, normalized_shape, input_shape): class LayerNorm(nn.Module): def __init__(self, normalized_shape): super().__init__() self.mod = nn.LayerNorm(normalized_shape, eps=1e-02) def forward(self, x): return self.mod(x) input_specs = [ InputTensorSpec( shape=(-1, ) + input_shape, dtype=torch.float32, shape_ranges=[((1, ) + input_shape, (4, ) + input_shape, (10, ) + input_shape)], ), ] self.run_test_with_dynamic_shape(LayerNorm(normalized_shape), input_specs, expected_ops={acc_ops.layer_norm})
def test_linear_with_dynamic_shape(self): class TestModule(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(512, 256) def forward(self, x): return self.linear(x) input_specs = [ InputTensorSpec( shape=(-1, 3, 512), dtype=torch.float32, shape_ranges=[((1, 3, 512), (3, 3, 512), (4, 3, 512))], ), ] self.run_test_with_dynamic_shape( TestModule(), input_specs, expected_ops={acc_ops.linear}, )
def test_flatten_with_dynamic_shape(self, _, start_dim, end_dim): class Flatten(nn.Module): def __init__(self, start, end): super().__init__() self.start = start self.end = end def forward(self, x): return torch.flatten(x, self.start, self.end) input_specs = [ InputTensorSpec( shape=(-1, -1, -1, -1, -1), dtype=torch.float32, shape_ranges=[((1, 1, 1, 1, 1), (1, 2, 3, 2, 1), (3, 3, 3, 3, 3))], ), ] self.run_test_with_dynamic_shape( Flatten(start_dim, end_dim), input_specs, expected_ops={acc_ops.flatten}, )
def test_tile_with_dynamic_shape(self, _, shape, dims): class Tile(nn.Module): def __init__(self, dims): super().__init__() self.dims = dims def forward(self, x): return torch.tile(x, self.dims) input_specs = [ InputTensorSpec( shape=shape, dtype=torch.float32, shape_ranges=[( tuple(i if i != -1 else 1 for i in shape), tuple(i if i != -1 else 2 for i in shape), tuple(i if i != -1 else 3 for i in shape), )], ), ] self.run_test_with_dynamic_shape(Tile(dims), input_specs, expected_ops={acc_ops.tile})