Example #1
0
class TestSlice:
    @pytest.mark.skipif(_python_version() < (3, 6),
                        reason="requires python 3.6")
    @pytest.mark.parametrize(
        "backend",
        backends,
    )
    def test_dynamic_slice(self, backend):
        class DynamicSlicer(torch.nn.Module):
            def __init__(self):
                super(DynamicSlicer, self).__init__()

            def forward(self, x, context_length):
                return x[context_length:, :, :]

        class Model(torch.nn.Module):
            def __init__(self):
                super(Model, self).__init__()
                self.tokens_embedding = torch.nn.Embedding(10, 10, 0)
                self.context_embedding = torch.nn.Embedding(10, 10, 0)
                self.dynamic_slicer = DynamicSlicer()

            def forward(self, tokens, context, context_length):
                tokens_embeddings = self.tokens_embedding(tokens)
                context_embeddings = self.context_embedding(context)
                embeddings = torch.cat((context_embeddings, tokens_embeddings),
                                       dim=0)
                embeddings = self.dynamic_slicer(embeddings, context_length)

                return embeddings

        model = Model()
        batch_size = 5
        inputs = [
            TensorType(name="tokens", shape=(10, batch_size), dtype=np.int64),
            TensorType(name="context", shape=(3, batch_size), dtype=np.int64),
            TensorType(name="context_length", shape=(), dtype=np.int32),
        ]
        run_compare_torch(inputs,
                          model,
                          rand_range=(0, 8),
                          backend=backend,
                          use_scripting=False)
Example #2
0
class TestActivation:
    @pytest.mark.parametrize(
        "backend, rank", itertools.product(backends, range(1, 6)),
    )
    def test_relu(self, backend, rank):
        input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
        model = nn.ReLU().eval()
        run_compare_torch(
            input_shape, model, backend=backend,
        )

        model = ModuleWrapper(nn.functional.relu_)
        run_compare_torch(
            input_shape, model, backend=backend,
        )

    @pytest.mark.parametrize(
        "backend, rank", itertools.product(backends, range(1, 6)),
    )
    def test_relu6(self, backend, rank):
        input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
        model = nn.ReLU6().eval()
        run_compare_torch(
            input_shape, model, backend=backend,
        )

    @pytest.mark.parametrize(
        "backend, alpha", itertools.product(backends, [0.1, 0.25, 2.0]),
    )
    def test_prelu(self, backend, alpha):
        input_shape = tuple(np.random.randint(low=5, high=10, size=4))
        C = input_shape[1]
        model = nn.PReLU(C, alpha).eval()
        run_compare_torch(
            input_shape, model, backend=backend,
        )

    @pytest.mark.parametrize(
        "backend, rank, alpha",
        itertools.product(backends, range(1, 6), [0.1, 2.0, 1.5]),
    )
    def test_leaky_relu(self, backend, rank, alpha):
        input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
        model = nn.LeakyReLU(negative_slope=alpha).eval()
        run_compare_torch(
            input_shape, model, backend=backend,
        )

        model = ModuleWrapper(nn.functional.leaky_relu_, {'negative_slope': alpha})
        run_compare_torch(
            input_shape, model, backend=backend,
        )

    @pytest.mark.parametrize(
        "backend, rank", itertools.product(backends, range(1, 6)),
    )
    def test_softmax(self, backend, rank):
        input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
        model = nn.Softmax().eval()
        run_compare_torch(
            input_shape, model, backend=backend,
        )

    @pytest.mark.parametrize(
        "backend, rank, range_val",
        itertools.product(
            backends, range(1, 6), [(-1.0, 1.0), (0.0, 0.1), (1.0, 3.0), (-1.0, 6.0)]
        ),
    )
    def test_hardtanh(self, backend, rank, range_val):
        input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
        model = nn.Hardtanh(range_val[0], range_val[1]).eval()
        run_compare_torch(
            input_shape, model, backend=backend,
        )

        model = ModuleWrapper(nn.functional.hardtanh_,
                     {'min_val': range_val[0], 'max_val': range_val[1]})
        run_compare_torch(
            input_shape, model, backend=backend,
        )

    @pytest.mark.parametrize(
        "backend, rank, alpha",
        itertools.product(backends, range(1, 6), [0.1, 2.0, 1.5]),
    )
    def test_elu(self, backend, rank, alpha):
        input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
        model = nn.ELU(alpha).eval()
        run_compare_torch(
            input_shape, model, backend=backend,
        )

    # rdar://problem/66557565
    @pytest.mark.parametrize(
        "backend, rank", itertools.product(['nn_proto'], range(1, 6)),
    )
    def test_gelu(self, backend, rank):
        input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
        model = nn.GELU().eval()
        run_compare_torch(
            input_shape, model, backend=backend,
        )

    @pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
    @pytest.mark.parametrize(
        "backend, rank", itertools.product(backends, range(1, 6)),
    )
    def test_erf(self, backend, rank):
        input_shape = tuple(np.random.randint(low=1, high=10, size=rank))

        class ERFActivation(nn.Module):
            def __init__(self):
                super().__init__()

            def forward(self, x):
                return torch.erf(x)

        model = ERFActivation().eval()
        run_compare_torch(
            input_shape, model, backend=backend,
        )

    @pytest.mark.parametrize(
        "backend, rank", itertools.product(backends, range(1, 6)),
    )
    def test_sigmoid(self, backend, rank):
        input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
        model = nn.Sigmoid().eval()
        run_compare_torch(
            input_shape, model, backend=backend,
        )

    @pytest.mark.skipif(_python_version() < (3, 6), reason="requires python 3.6")
    @pytest.mark.parametrize(
        "backend, rank", itertools.product(backends, range(1, 6)),
    )
    def test_sigmoid_hard(self, backend, rank):
        input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
        model = nn.Hardsigmoid().eval()
        run_compare_torch(
            input_shape, model, backend=backend,
        )

    @pytest.mark.parametrize(
        "backend, beta, threshold", itertools.product(backends, [1, 2, 5], [5, 10, 20]),
    )
    @pytest.mark.skipif(
        _macos_version() <= (11,),
        reason="Parametric SoftPlus segfaults on macOS 10.15 and below. (rdar://problem/66555235)",
    )
    def test_softplus(self, backend, beta, threshold):
        input_shape = (1, 10, 5, 15)
        model = nn.Softplus(beta, threshold).eval()
        run_compare_torch(
            input_shape, model, backend=backend,
        )

    # rdar://problem/66557565
    @pytest.mark.parametrize(
        "backend, rank", itertools.product(['nn_proto'], range(1, 6)),
    )
    def test_softsign(self, backend, rank):
        input_shape = tuple(np.random.randint(low=1, high=10, size=rank))
        model = nn.Softsign().eval()
        run_compare_torch(
            input_shape, model, backend=backend,
        )