def forward(self, x):

        if self.train_x:
            xp = pmath.project(pmath.expmap0(self.xp, c=self.c), c=self.c)
            return self.grad_fix(
                pmath.project(pmath.expmap(xp, x, c=self.c), c=self.c))
        return self.grad_fix(
            pmath.project(pmath.expmap0(x, c=self.c), c=self.c))
 def forward(self, x, c=None):
     if c is None:
         c = self.c
     mv = pmath.mobius_matvec(self.weight, x, c=c)
     if self.bias is None:
         return pmath.project(mv, c=c)
     else:
         bias = pmath.expmap0(self.bias, c=c)
         return pmath.project(pmath.mobius_add(mv, bias), c=c)
def ker_by_channel(channel, ker, c=None, padding=0):
    channel = nn.ConstantPad2d(padding, 0)(channel)
    c_out, kernel_size, _ = ker.size()
    bs, m1, m2 = channel.size()
    channel = pmath.logmap0(channel.view(bs, -1), c=c).view(bs, m1, m2)
    channel = nn.functional.conv2d(channel.unsqueeze(1), ker.unsqueeze(1), bias=None).view(bs * c_out, -1)
    channel = pmath.expmap0(channel, c=c)
    channel = pmath.project(channel, c=c)
    return channel
 def forward(self, x, c=None):
     if c is None:
         c = torch.as_tensor(self.c).type_as(x)
     else:
         c = torch.as_tensor(c).type_as(x)
     p_vals_poincare = pmath.expmap0(self.p_vals, c=c)
     conformal_factor = (1 - c * p_vals_poincare.pow(2).sum(dim=1, keepdim=True))
     a_vals_poincare = self.a_vals * conformal_factor
     logits = pmath._hyperbolic_softmax(x, a_vals_poincare, p_vals_poincare, c)
     return logits
    def forward(self, x, c=None):
        if c is None:
            c = self.c

        x_eucl = pmath.logmap0(x, c=c)
        out = self.conv(x_eucl)
        x_hyp = pmath.expmap0(out, c=c)
        x_hyp_proj = pmath.project(x_hyp, c=c)

        return x_hyp_proj
    def forward(self, x, c=None):
        if c is None:
            c = self.c

        # note that logmap and exmap are happening with respect to origin
        x_eucl = pmath.logmap0(x, c=c)
        out = self.lin(x_eucl)
        x_hyp = pmath.expmap0(out, c=c)
        x_hyp_proj = pmath.project(x_hyp, c=c)

        return x_hyp_proj
    def forward(self, x, c=None):
        if c is None:
            c = self.c

        # do cast back x to R^n, do conv, then cast the result back to H space
        x = pmath.logmap0(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())
        out = self.conv(x)
        out = pmath.expmap0(out.view(out.size(0) * out.size(1), -1), c=c).view(out.size())

        # now add the H^n bias
        if self.bias is None:
            return pmath.project(out.view(out.size(0) * out.size(1), -1), c=c).view(out.size())
        else:
            bias = pmath.expmap0(self.bias, c=c)
            bias = bias.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(out)
            # print dimensions
#             print(out.size())
#             print(bias.size())
            # conventional vector normalization
            interm = pmath.mobius_add(out.contiguous().view(out.size(0) * out.size(1), -1), bias.contiguous().view(bias.size(0) * bias.size(1), -1), c=c).view(out.size())
            normed = pmath.project(interm.view(interm.size(0) * interm.size(1), -1), c=c).view(interm.size())
            return normed
 def forward(self, x):
     if self.train_x:
         xp = pmath.project(pmath.expmap0(self.xp, c=self.c), c=self.c)
         return pmath.logmap(xp, x, c=self.c)
     return pmath.logmap0(x, c=self.c)
Exemple #9
0
    def forward(self, x, c=None):
        if c is None:
            c = self.c

        # do proper normalization of euclidean data
        x = pmath.project(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())

        # BLOCK 1

        x = self.c1(x, c=c)
        # batch norm
        #         x = pmath.logmap0(x, c=c)
        #         x = self.b1(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        # blocked relu and maxpool 2
        #         x = pmath.logmap0(x, c=c)
        #         x = nn.ReLU()(x)
        #         x = nn.MaxPool2d(2)(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        # separate relu and maxpool 2
        x = pmath.logmap0(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())
        x = nn.ReLU()(x)
        x = pmath.expmap0(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())

        #         xbrp = x
        #         print(f'norm after relu: {x.norm(dim=-1, keepdim=True, p=2)[0]}')
        x = pmath.project(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())
        #         print(f'norm after projection relu: {x.norm(dim=-1, keepdim=True, p=2)[0]}')
        #         print(f'diff: {xbrp[0]-x[0]}')
        #         print(f'diff sum: {sum(sum(sum(xbrp[0]-x[0])))}')
        #         print(f'x after relu project the same: {xbrp.equal(x)}')

        x = pmath.logmap0(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())
        x = nn.MaxPool2d(2)(x)
        x = pmath.expmap0(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())

        #         xbpp = x
        x = pmath.project(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())
        #         print(f'x after pool project the same: {xbpp.equal(x)}')

        #         # BLOCK 2

        #         x = self.c2(x, c=c)
        #         # batch norm
        # #         x = pmath.logmap0(x, c=c)
        # #         x = self.b2(x)
        # #         x = pmath.expmap0(x, c=c)
        # #         x = pmath.project(x, c=c)

        #         # blocked relu and maxpool 2
        # #         x = pmath.logmap0(x, c=c)
        # #         x = nn.ReLU()(x)
        # #         x = nn.MaxPool2d(2)(x)
        # #         x = pmath.expmap0(x, c=c)
        # #         x = pmath.project(x, c=c)

        #         # separate relu and maxpool 2
        #         x = pmath.logmap0(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())
        #         x = nn.ReLU()(x)
        #         x = pmath.expmap0(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())
        #         x = pmath.project(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())

        #         x = pmath.logmap0(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())
        #         x = nn.MaxPool2d(2)(x)
        #         x = pmath.expmap0(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())
        #         x = pmath.project(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())

        #         # BLOCK 3

        #         x = self.c3(x, c=c)
        #         # batch norm
        # #         x = pmath.logmap0(x, c=c)
        # #         x = self.b3(x)
        # #         x = pmath.expmap0(x, c=c)
        # #         x = pmath.project(x, c=c)

        #         # blocked relu and maxpool 2
        # #         x = pmath.logmap0(x, c=c)
        # #         x = nn.ReLU()(x)
        # #         x = nn.MaxPool2d(2)(x)
        # #         x = pmath.expmap0(x, c=c)
        # #         x = pmath.project(x, c=c)

        #         # separate relu and maxpool 2
        #         x = pmath.logmap0(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())
        #         x = nn.ReLU()(x)
        #         x = pmath.expmap0(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())
        #         x = pmath.project(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())

        #         x = pmath.logmap0(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())
        #         x = nn.MaxPool2d(2)(x)
        #         x = pmath.expmap0(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())
        #         x = pmath.project(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())

        #         # BLOCK 4

        #         x = self.c4(x, c=c)
        #         # batch norm
        # #         x = pmath.logmap0(x, c=c)
        # #         x = self.b4(x)
        # #         x = pmath.expmap0(x, c=c)
        # #         x = pmath.project(x, c=c)

        #         # blocked relu and maxpool 2
        # #         x = pmath.logmap0(x, c=c)
        # #         x = nn.ReLU()(x)
        # #         x = nn.MaxPool2d(2)(x)
        # #         x = pmath.expmap0(x, c=c)
        # #         x = pmath.project(x, c=c)

        #         # separate relu and maxpool 2
        #         x = pmath.logmap0(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())
        #         x = nn.ReLU()(x)
        #         x = pmath.expmap0(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())
        #         x = pmath.project(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())

        #         x = pmath.logmap0(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())
        #         x = nn.MaxPool2d(2)(x)
        #         x = pmath.expmap0(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())
        #         x = pmath.project(x.view(x.size(0) * x.size(1), -1), c=c).view(x.size())

        # final pool
        x = pmath.logmap0(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())
        x = nn.MaxPool2d(5)(x)
        x = pmath.expmap0(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())
        x = pmath.project(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())

        # print(x.size()), currently N x 512 x 1 x 1

        # currently I believe this step may mess with the geometry
        # what would be a natural replacement? A: view as eucl vector, then do expmap to go back to hyperbolic space
        x = x.view(x.size(0), -1)
        x = pmath.expmap0(x, c=c)
        x = pmath.project(x, c=c)
        return x
Exemple #10
0
    def forward(self, x, c=None):
        if c is None:
            c = self.c

        # BLOCK 1

        x = self.c1(x)
        # batch norm
        #         x = pmath.logmap0(x, c=c)
        #         x = self.b1(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        # blocked relu and maxpool 2
        #         x = pmath.logmap0(x, c=c)
        #         x = nn.ReLU()(x)
        #         x = nn.MaxPool2d(2)(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        # separate relu and maxpool 2
        #         x = pmath.logmap0(x, c=c)
        x = nn.ReLU()(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        #         x = pmath.logmap0(x, c=c)
        x = nn.MaxPool2d(2)(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        # BLOCK 2

        x = self.c2(x)
        # batch norm
        #         x = pmath.logmap0(x, c=c)
        #         x = self.b2(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        # blocked relu and maxpool 2
        #         x = pmath.logmap0(x, c=c)
        #         x = nn.ReLU()(x)
        #         x = nn.MaxPool2d(2)(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        # separate relu and maxpool 2
        #         x = pmath.logmap0(x, c=c)
        x = nn.ReLU()(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        #         x = pmath.logmap0(x, c=c)
        x = nn.MaxPool2d(2)(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        # BLOCK 3

        x = self.c3(x)
        # batch norm
        #         x = pmath.logmap0(x, c=c)
        #         x = self.b3(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        # blocked relu and maxpool 2
        #         x = pmath.logmap0(x, c=c)
        #         x = nn.ReLU()(x)
        #         x = nn.MaxPool2d(2)(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        # separate relu and maxpool 2
        #         x = pmath.logmap0(x, c=c)
        x = nn.ReLU()(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        #         x = pmath.logmap0(x, c=c)
        x = nn.MaxPool2d(2)(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        # BLOCK 4, to hyperbolic

        x = self.e2p(x)

        x = self.c4(x, c=c)
        # batch norm
        #         x = pmath.logmap0(x, c=c)
        #         x = self.b4(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        # blocked relu and maxpool 2
        #         x = pmath.logmap0(x, c=c)
        #         x = nn.ReLU()(x)
        #         x = nn.MaxPool2d(2)(x)
        #         x = pmath.expmap0(x, c=c)
        #         x = pmath.project(x, c=c)

        # separate relu and maxpool 2
        x = pmath.logmap0(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())
        x = nn.ReLU()(x)
        x = pmath.expmap0(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())
        x = pmath.project(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())

        x = pmath.logmap0(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())
        x = nn.MaxPool2d(2)(x)
        x = pmath.expmap0(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())
        x = pmath.project(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())

        # final pool
        x = pmath.logmap0(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())
        x = nn.MaxPool2d(5)(x)
        x = pmath.expmap0(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())
        x = pmath.project(x.view(x.size(0) * x.size(1), -1),
                          c=c).view(x.size())

        # print(x.size()), currently N x 512 x 1 x 1

        # currently I believe this step may mess with the geometry
        # what would be a natural replacement? A: view as eucl vector, then do expmap to go back to hyperbolic space
        x = x.view(x.size(0), -1)
        x = pmath.expmap0(x, c=c)
        x = pmath.project(x, c=c)
        return x