Esempio n. 1
0
    def forward(self, x, stage):
        '''
            for alpha in [0, 1), and 2*k+2 + alpha < self.max_stage (-1 <= k <= ...):
            stage 0 + alpha       : p <-        block[0] <- in[0] * 1
            stage 2*k+1 + alpha   : p <- ... <- block[k] <- (up <- in[k]) * (1 - alpha)
                                    .................... <- (block[k+1] <- in[k+1]) * (alpha)
            stage 2*k+2 + alpha   : p <- ............... <- (block[k+1] <- in[k+1]) * 1
            over flow stages continues.
        '''
        stage = min(stage, self.max_stage - 1e-8)
        alpha = stage - math.floor(stage)
        stage = math.floor(stage)

        h = x
        if stage % 2 == 0:
            k = (stage - 2) // 2
            h = F.leaky_relu(self.ins[k + 1](h))
            for i in reversed(range(0, (k + 1) + 1)):  # k+1 .. 0
                h = self.blocks[i](h)
        else:
            k = (stage - 1) // 2

            h_0 = F.leaky_relu(self.ins[k](downscale2x(h)))
            h_1 = self.blocks[k + 1](F.leaky_relu(self.ins[k + 1](x)))
            assert 0. <= alpha < 1.
            h = (1.0 - alpha) * h_0 + alpha * h_1

            for i in reversed(range(0, k + 1)):  # k .. 0
                h = self.blocks[i](h)

        inv_norm = F.rsqrt(
            F.square(h[:, -9:-6]) + F.square(h[:, -6:-3]) + 1e-8)
        camera_param = F.concat(
            [h[:, -9:-6] * inv_norm, h[:, -6:-3] * inv_norm, h[:, -3:]])
        return h[:, :-9], camera_param
Esempio n. 2
0
 def forward(self, z):
     camera_param = self.net(z)
     # normalize to cos**2+sin**2=1
     inv_norm = F.rsqrt(
         F.square(camera_param[:, :3]) + F.square(camera_param[:, 3:6]) +
         1e-8)
     camera_param = F.concat([
         camera_param[:, :3] * inv_norm, camera_param[:, 3:6] * inv_norm,
         camera_param[:, 6:]
     ])
     return camera_param
    def forward(self, inputs, device):
        x, y = inputs

        mean = functions.mean(x, axis=1)
        d = x - mean[:, None]
        var = functions.mean(d * d, axis=1)
        inv_std = functions.rsqrt(var + self.eps)

        dummy_gamma = self.backend_config.xp.ones(self.shape[0],
                                                  dtype=self.dtype)

        return gn_module._MulInvStd(self.eps, mean.array, inv_std.array,
                                    dummy_gamma).apply((x, y))
    def forward(self, inputs, device):
        x, y = inputs

        mean = functions.mean(x, axis=1)
        d = x - mean[:, None]
        var = functions.mean(d * d, axis=1)
        inv_std = functions.rsqrt(var + self.eps)

        dummy_gamma = self.backend_config.xp.ones(
            self.shape[0], dtype=self.dtype)

        return gn_module._MulInvStd(
            self.eps, mean.array, inv_std.array, dummy_gamma).apply((x, y))
Esempio n. 5
0
    def __call__(self, batch):
        """
        Input
        -----
        batch: Input Variable of shape [N, hidden_dim]
        """
        # calc mini-batch squared mean
        nu = F.mean(F.square(batch), axis=0)

        # Normalize
        sig_hat = F.rsqrt(F.bias(nu, self.eps))
        activated = F.scale(batch, sig_hat)

        # shift
        shift = F.bias(F.scale(activated, self.gamma), self.beta)

        # TRU
        return F.maximum(shift, self.tau)
Esempio n. 6
0
 def test_rsqrt(self):
     x = numpy.random.uniform(0.1, 5, (3, 2)).astype(numpy.float32)
     testing.assert_allclose(F.rsqrt(x).data, rsqrt(x))
Esempio n. 7
0
def vnorm_e(a):
    return a * F.rsqrt(vdot_e(a, a))
def feature_vector_normalization(x, eps=1e-8):
    # x: (B, C, H, W)
    alpha = F.rsqrt(F.mean(x**2, axis=1, keepdims=True) + eps)
    return F.broadcast_to(alpha, x.data.shape) * x
Esempio n. 9
0
 def test_rsqrt(self):
     x = numpy.random.uniform(0.1, 5, (3, 2)).astype(numpy.float32)
     testing.assert_allclose(F.rsqrt(x).data, rsqrt(x))
Esempio n. 10
0
 def rsqrt(self, x):
     return F.rsqrt(x)