Ejemplo n.º 1
0
 def forward(self, x):
     """
     x: N x C x T
     """
     if x.dim() != 3:
         raise RuntimeError("{} accept 3D tensor as input".format(self.__name__))
     # N x 1 x 1
     mean = flow.mean(x, (1, 2), keepdim=True)
     var = flow.mean((x - mean) ** 2, (1, 2), keepdim=True)
     # N x C x T
     if self.elementwise_affine:
         x = self.gamma * (x - mean) / flow.sqrt(var + self.eps) + self.beta
     else:
         x = (x - mean) / flow.sqrt(var + self.eps)
     return x
Ejemplo n.º 2
0
    def forward(self, inputs, targets):
        """
        Args:
            inputs (torch.Tensor): feature matrix with shape (batch_size, feat_dim).
            targets (torch.LongTensor): ground truth labels with shape (num_classes).
        """
        n = inputs.size(0)

        # Compute pairwise distance, replace by the official when merged
        dist = flow.pow(inputs, 2).sum(dim=1).expand(n, n)
        dist = dist + flow.transpose(dist, dim0=1, dim1=0)
        temp1 = -2 * flow.matmul(inputs, flow.transpose(inputs, dim0=1,
                                                        dim1=0))
        dist = flow.add(dist, temp1)
        dist = flow.sqrt(flow.clamp(dist, min=1e-12))
        # For each anchor, find the hardest positive and negative
        mask = targets.expand(n, n).eq(
            flow.transpose(targets.expand(n, n), dim0=1, dim1=0))
        dist_ap, dist_an = [], []
        y1 = flow.zeros((1, n), dtype=flow.float32).to("cuda")
        y2 = flow.Tensor(np.exp(100 * np.ones((1, n)))).to("cuda")

        for i in range(n):
            temp_dist = flow.slice(dist, [(i, i + 1, 1)])
            temp_mask = flow.slice(mask, [(i, i + 1, 1)])
            temp_mask_rev = flow.slice(1 - mask, [(i, i + 1, 1)])
            dist_ap.append(temp_mask.where(temp_dist, y1).max().unsqueeze(0))
            dist_an.append(
                temp_mask_rev.where(temp_dist, y2).min().unsqueeze(0))
        dist_ap = flow.cat(dist_ap)
        dist_an = flow.cat(dist_an)

        # Compute ranking hinge loss
        y = flow.ones_like(dist_an)
        return self.ranking_loss(dist_an, dist_ap, y)
Ejemplo n.º 3
0
 def test_sqrt(test_case):
     input_arr = np.random.randn(3, 2, 5, 7)
     np_out = np.sqrt(input_arr)
     x = flow.Tensor(input_arr)
     of_out = flow.sqrt(input=x)
     test_case.assertTrue(
         np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True))
Ejemplo n.º 4
0
    def forward(self, x):

        if hasattr(self, "qi"):
            self.qi.update(x)
            x = self.qi.fake_quantize_tensor(x)

        if self.training:
            y = flow.nn.functional.conv2d(
                x,
                self.conv_module.weight,
                self.conv_module.bias,
                stride=self.conv_module.stride,
                padding=self.conv_module.padding,
                dilation=self.conv_module.dilation,
                groups=self.conv_module.groups,
            )
            y = y.permute(1, 0, 2, 3)  # NCHW -> CNHW
            y = y.view(self.conv_module.out_channels, -1)  # CNHW -> C,NHW
            mean = y.mean(1).detach()
            var = y.var(1).detach()
            self.bn_module.running_mean = (
                self.bn_module.momentum * self.bn_module.running_mean +
                (1 - self.bn_module.momentum) * mean)
            self.bn_module.running_var = (
                self.bn_module.momentum * self.bn_module.running_var +
                (1 - self.bn_module.momentum) * var)
        else:
            mean = flow.Tensor(self.bn_module.running_mean)
            var = flow.Tensor(self.bn_module.running_var)

        std = flow.sqrt(var + self.bn_module.eps)

        weight, bias = self.fold_bn(mean, std)

        self.qw.update(weight.data)

        x = flow.nn.functional.conv2d(
            x,
            self.qw.fake_quantize_tensor(weight),
            bias,
            stride=self.conv_module.stride,
            padding=self.conv_module.padding,
            dilation=self.conv_module.dilation,
            groups=self.conv_module.groups,
        )

        if hasattr(self, "qo"):
            self.qo.update(x)
            x = self.qo.fake_quantize_tensor(x)

        return x
Ejemplo n.º 5
0
    def gradient_penalty(self, y, x):
        """Compute gradient penalty: (L2_norm(dy/dx) - 1)**2."""
        weight = flow.ones(y.size()).to(self.device)

        dydx = flow.autograd.grad(outputs=y,
                                  inputs=x,
                                  out_grads=weight,
                                  retain_graph=True,
                                  create_graph=True)[0]

        dydx = dydx.view(dydx.size(0), -1)
        dydx_l2norm = flow.sqrt(flow.sum(dydx**2, dim=1))

        return flow.mean((dydx_l2norm - 1)**2)
Ejemplo n.º 6
0
 def _forward(self, x):
     axis = 1
     params_shape = [x.shape[axis]]
     weight = self.weight
     bias = self.bias
     nd_params_shape = [1] * len(x.shape)
     nd_params_shape[axis] = params_shape[0]
     mean = x.mean(2, keepdim=True)
     variance = x.var(2, unbiased=False, keepdim=True)
     normalized = (x - mean) / flow.sqrt(variance + self.eps)
     if self.weight is not None and params_shape[0] == self.weight.nelement(
     ):
         weight = flow.reshape(self.weight, shape=nd_params_shape)
     if self.bias is not None and params_shape[0] == self.bias.nelement():
         bias = flow.reshape(self.bias, shape=nd_params_shape)
     if self.weight is not None:
         normalized = normalized * weight
     if self.bias is not None:
         normalized = normalized + bias
     return normalized
Ejemplo n.º 7
0
 def forward(self, input: Tensor) -> Tensor:
     assert (len(input.shape) >=
             3), "The dimensions of input tensor must larger than 2"
     assert (input.shape[1] == self.num_channels
             ), "The channels of input tensor must equal num_channels"
     origin_shape = input.shape
     reshape_to_1d = flow.reshape(
         input, shape=[origin_shape[0], self.num_groups, -1])
     mean = flow.mean(reshape_to_1d, dim=2, keepdim=True)
     variance = flow.var(reshape_to_1d, dim=2, unbiased=False, keepdim=True)
     normalized = (reshape_to_1d - mean) / flow.sqrt(variance + self.eps)
     normalized = flow.reshape(
         normalized, shape=[origin_shape[0], self.num_channels, -1])
     if self.weight is not None:
         normalized = normalized * self.weight.reshape(
             1, self.num_channels, 1)
     if self.bias is not None:
         normalized = normalized + self.bias.reshape(
             1, self.num_channels, 1)
     res = flow.reshape(normalized, shape=tuple(input.shape))
     return res
Ejemplo n.º 8
0
def _sqrt(self):
    return flow.sqrt(self)
Ejemplo n.º 9
0
 def forward(self, x):
     mean = x.mean(-1, keepdim=True)
     std = (x - mean).pow(2).mean(-1, keepdim=True)
     x = (x - mean) / flow.sqrt(std + self.eps)
     return self.weight * x + self.bias