Ejemplo n.º 1
0
 def test_sin(self, skip_to_glow=False):
     # Ensures range is in [-2*pi, 2*pi]
     x = 4 * np.pi * (torch.rand(2, 3, 4) - 0.5)
     utils.compare_tracing_methods(
         SimpleSinModule(), x, fusible_ops={"aten::sin"}, skip_to_glow=skip_to_glow
     )
Ejemplo n.º 2
0
 def test_sum(self, _, module, a):
     utils.compare_tracing_methods(module, a, fusible_ops={"aten::sum"})
Ejemplo n.º 3
0
 def test_transpose(self, _, module, tensor, reference=None):
     utils.compare_tracing_methods(module,
                                   tensor,
                                   fusible_ops={"aten::transpose"})
Ejemplo n.º 4
0
    def test_embedding_bag_rowwise_offsets(
        self,
        name,
        num_lengths,
        is4bit,
        is_weighted,
        use_fp16,
        per_sample_weights_fp16,
    ):
        """Test of quantized::embedding_bag_byte_rowwise_offsets and
        quantized::embedding_bag_4bit_rowwise_offsets node on glow"""
        check_skip(self)

        class TestModule(torch.nn.Module):
            def __init__(self,
                         q_weights,
                         is4bit=False,
                         per_sample_weights=None):
                super().__init__()
                self.q_weights = q_weights
                self.per_sample_weights = per_sample_weights
                if is4bit:
                    self.op = torch.ops.quantized.embedding_bag_4bit_rowwise_offsets
                else:
                    self.op = torch.ops.quantized.embedding_bag_byte_rowwise_offsets

            def forward(self, indices, offsets):
                return self.op(
                    self.q_weights,
                    indices,
                    offsets,
                    mode=0,
                    per_sample_weights=self.per_sample_weights,
                    include_last_offset=True,
                )

        # generate random weights and indices
        num_embeddings = 16
        embedding_dim = 4
        weights = torch.from_numpy((np.random.random_sample(
            (num_embeddings, embedding_dim)) + 1).astype(np.float32))
        q_weights = (torch.ops.quantized.embedding_bag_4bit_prepack(weights)
                     if is4bit else
                     torch.ops.quantized.embedding_bag_byte_prepack(weights))
        np_lengths = (np.zeros(shape=[10]).astype(np.int32)
                      if num_lengths == 0 else np.random.randint(
                          0, num_lengths, size=10).astype(np.int32))
        num_lengths = np.sum(np_lengths)
        lengths = torch.from_numpy(np_lengths)
        indices = torch.from_numpy(
            np.random.randint(low=0,
                              high=num_embeddings,
                              size=num_lengths,
                              dtype=np.int64)).long()
        offsets = torch.cat([torch.zeros([1]),
                             torch.cumsum(lengths, 0)]).long()

        per_sample_weights_type = (np.float16 if per_sample_weights_fp16
                                   and is4bit else np.float32)
        per_sample_weights = torch.from_numpy(
            np.random.uniform(low=0.01, high=0.5,
                              size=[len(indices)
                                    ]).astype(per_sample_weights_type))

        m = TestModule(q_weights, is4bit,
                       per_sample_weights if is_weighted else None)

        utils.compare_tracing_methods(
            m,
            indices,
            offsets,
            fusible_ops={
                "quantized::embedding_bag_4bit_rowwise_offsets"
                if is4bit else "quantized::embedding_bag_byte_rowwise_offsets"
            },
            fp16=use_fp16,
            # FP16 version is known to yeild different results, so our
            # test here is mainly focusing on the flow rather than actual
            # accuracy. There will be additional coverage on accuracy of
            # the lowered modules
            atol=0.02 if (is4bit or use_fp16) else 5e-4,
        )
Ejemplo n.º 5
0
 def test_masked_fill(self, _, module, tensor, mask):
     utils.compare_tracing_methods(module,
                                   tensor,
                                   mask,
                                   fusible_ops={"aten::masked_fill"})
Ejemplo n.º 6
0
    def test_batchnorm_with_weights(self):
        """
        Test of the PyTorch 2D batchnorm Node with weights and biases on Glow.
        """
        class SimpleQuantizedBatchNorm(nn.Module):
            def __init__(
                self,
                C,
                weight,
                bias,
                running_mean,
                running_var,
                in_scale,
                in_zero_point,
                out_scale,
                out_zero_point,
            ):
                super(SimpleQuantizedBatchNorm, self).__init__()
                self.qconfig = my_qconfig
                self.batchnorm = nn.BatchNorm3d(C)
                self.batchnorm.scale = out_scale
                self.batchnorm.zero_point = out_zero_point
                self.batchnorm.weight = nn.Parameter(weight)
                self.batchnorm.bias = nn.Parameter(bias)
                self.batchnorm.running_mean = running_mean
                self.batchnorm.running_var = running_var
                self.relu = nn.ReLU()
                self.q = torch.ao.quantization.QuantStub()
                self.q.scale = in_scale
                self.q.zero_point = in_zero_point
                self.dq = torch.ao.quantization.DeQuantStub()

            def forward(self, x):
                qx = self.q(x)
                qy = self.batchnorm(qx)
                y = self.dq(qy)
                return y

        C = 7
        in_scale = 0.0031
        out_scale = 0.0047
        in_zero_point = -42
        out_zero_point = 23
        weight = torch.ones(C) + torch.rand(C) * 0.001
        bias = torch.rand(C) * 0.0001
        running_mean = torch.zeros(C)
        running_var = torch.ones(C)

        inputs = torch.randn((6, C, 4, 33, 42), requires_grad=False)
        model = SimpleQuantizedBatchNorm(
            C,
            weight,
            bias,
            running_mean,
            running_var,
            in_scale,
            in_zero_point,
            out_scale,
            out_zero_point,
        )
        model.eval()

        utils.compare_tracing_methods(
            model,
            inputs,
            skip_to_glow=True,
        )
Ejemplo n.º 7
0
    def test_clamp_min(self):
        """Test of the PyTorch clamp_min Node on Glow."""

        utils.compare_tracing_methods(
            SimpleClampMinModel(0.1), torch.randn(7), fusible_ops={"aten::clamp_min"}
        )
Ejemplo n.º 8
0
 def test_topk_basic(self):
     """Test of the PyTorch TopK Node on Glow."""
     utils.compare_tracing_methods(SimpleTopkModel(3),
                                   torch.arange(1.0, 6.0),
                                   fusible_ops={"aten::topk"})
Ejemplo n.º 9
0
 def test_simple(self, module, tensor):
     utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::view"})
Ejemplo n.º 10
0
 def test_arange(self, _, module, dummy):
     """Testing arange with minimum parameters"""
     utils.compare_tracing_methods(module,
                                   dummy,
                                   fusible_ops={"aten::arange"})
Ejemplo n.º 11
0
 def test_fmod(self, _, module, a, b):
     utils.compare_tracing_methods(module, a, b, fusible_ops={"aten::fmod"})
Ejemplo n.º 12
0
 def test_unsqueeze(self, _, module, tensor):
     utils.compare_tracing_methods(module,
                                   tensor,
                                   fusible_ops=["aten::unsqueeze"])
Ejemplo n.º 13
0
 def test_atan(self, skip_to_glow=False):
     x = torch.randn(2, 3, 4)
     utils.compare_tracing_methods(
         SimpleATanModule(), x, fusible_ops={"aten::atan"}, skip_to_glow=skip_to_glow
     )
Ejemplo n.º 14
0
 def test_asin(self, skip_to_glow=False):
     x = torch.rand(2, 3, 4) - 0.5  # Ensures range is in [-1,1]
     utils.compare_tracing_methods(
         SimpleASinModule(), x, fusible_ops={"aten::asin"}, skip_to_glow=skip_to_glow
     )
Ejemplo n.º 15
0
 def test_rsub(self, _, module, tensor, other):
     utils.compare_tracing_methods(module,
                                   tensor,
                                   other,
                                   fusible_ops={"aten::rsub"})
Ejemplo n.º 16
0
 def test_floor_div(self, _, module, left, right):
     utils.compare_tracing_methods(module,
                                   left,
                                   right,
                                   fusible_ops={"aten::floor_divide"})
Ejemplo n.º 17
0
    def test_clamp(self, _, min, max):
        """Test of the PyTorch clamp Node on Glow."""

        utils.compare_tracing_methods(SimpleClampModel(min, max),
                                      torch.randn(7),
                                      fusible_ops={"aten::clamp"})
Ejemplo n.º 18
0
    def test_reciprocal(self):
        """Test of the PyTorch reciprocal Node on Glow."""

        utils.compare_tracing_methods(SimpleReciprocalModel(),
                                      torch.randn(4),
                                      fusible_ops={"aten::reciprocal"})
Ejemplo n.º 19
0
    def test_pow_basic(self, _, power):
        """Test of the PyTorch pow Node on Glow."""

        utils.compare_tracing_methods(SimplePowModule(power),
                                      torch.rand(4) + 5,
                                      fusible_ops={"aten::pow"})
Ejemplo n.º 20
0
 def test_cumsum(self, _, tensor, dim):
     utils.compare_tracing_methods(SimpleCumSumModule(dim),
                                   tensor,
                                   fusible_ops={"aten::cumsum"})
Ejemplo n.º 21
0
 def test_squeeze(self, _, module, tensor):
     utils.compare_tracing_methods(module, tensor)
Ejemplo n.º 22
0
 def test_rsub_as_sub(self, _, module, tensor, other):
     # aten::rsub is normalized as aten::sub
     utils.compare_tracing_methods(module,
                                   tensor,
                                   other,
                                   fusible_ops={"aten::sub"})
Ejemplo n.º 23
0
    def test_conv2d(self, _, module, inputs, filters, bias=None):
        """Basic test of the PyTorch conv3d Node on Glow."""

        utils.compare_tracing_methods(
            module, inputs, filters, fusible_ops={"aten::_convolution"}
        )
Ejemplo n.º 24
0
 def test_split(self, tensor, split_size_or_sections, dimension):
     utils.compare_tracing_methods(
         SimpleSplitModel(split_size_or_sections, dimension), tensor)
Ejemplo n.º 25
0
    def test_sum_basic(self):
        a = torch.randn(2, 3, 4)

        utils.compare_tracing_methods(SimpleSumModule(),
                                      a,
                                      fusible_ops={"aten::sum"})
Ejemplo n.º 26
0
    def test_tanh_inplace(self):
        """Basic test of the PyTorch aten::tanh_ Node on Glow."""

        utils.compare_tracing_methods(SimpleTanhModel(inplace=True),
                                      torch.randn(4),
                                      fusible_ops={"aten::tanh"})
Ejemplo n.º 27
0
 def test_t(self, _, module, tensor):
     utils.compare_tracing_methods(module, tensor, fusible_ops={"aten::t"})
Ejemplo n.º 28
0
 def test_flatten(self, _, module, input):
     utils.compare_tracing_methods(module,
                                   input,
                                   fusible_ops={"aten::flatten"})
Ejemplo n.º 29
0
 def test_transpose_failure(self, _, module, tensor):
     with self.assertRaises(IndexError):
         utils.compare_tracing_methods(module,
                                       tensor,
                                       fusible_ops={"aten::transpose"})
Ejemplo n.º 30
0
    def test_erf_basic(self):
        """Test of the PyTorch erf Node on Glow."""

        utils.compare_tracing_methods(SimpleErfModule(), torch.randn(4))