Ejemplo n.º 1
0
    def evaluate_scalar(self,q_point=None,p_point=None):
        if not q_point is None:
            self.linkedV.load_point(q_point)
        if not p_point is None:
            self.load_point(p_point)
        _, H_ = self.linkedV.getH_tensor()
        #debug_dict.update({"abstract":_.clone()})

        lam, Q = eigen(H_)

        temp = softabs_map(lam, self.metric.msoftabsalpha)

        inv_exp_H = torch.mm(torch.mm(Q, torch.diag(1/temp)), torch.t(Q))
        #print("abstract p {}".format(self.flattened_tensor))
        #print("inv_exp_H {}".format(inv_exp_H))
        o = 0.5 * torch.dot(self.flattened_tensor, torch.mv(inv_exp_H, self.flattened_tensor))
        temp2 = 0.5 * torch.log((temp)).sum()
        #print("alpha {}".format(self.metric.msoftabsalpha))
        #print("lam {}".format(lam))
        #print("H_ {}".format(H_))
        #print("H_2 {}".format(torch.mm(torch.mm(Q, torch.diag(temp)), torch.t(Q))))
        #print("msoftabslambda {}".format(temp))
        #print("abstract tau {}".format(o))
        #print("abstract logdetmetric {}".format(temp2))

        output = o + temp2
        return (output)
Ejemplo n.º 2
0
 def dtaudp(self, p_flattened_tensor, lam=None, Q=None):
     #if Q == None or lam == None:
     #    _, H_ = self.linkedV.getH_tensor()
     #    lam, Q = eigen(H_)
     return (Q.mv(
         torch.diag(1 / softabs_map(lam, self.metric.msoftabsalpha)).mv(
             (torch.t(Q).mv(p_flattened_tensor)))))
 def T_givenq(p):
     _,H_ = getH(q,V)
     out = eigen(H_.data)
     lam = out[0]
     Q = out[1]
     temp = softabs_map(lam,alpha)
     inv_exp_H = torch.mm(torch.mm(Q,torch.diag(1/temp)),torch.t(Q))
     o = 0.5 * torch.dot(p.data,torch.mv(inv_exp_H,p.data))
     temp2 = 0.5 * torch.log((temp)).sum()
     return(o + temp2)
Ejemplo n.º 4
0
 def T_givenq(p):
     _, H_ = getH(q, V)
     #debug_dict.update({"explicit":_.data.clone()})
     out = eigen(H_.data)
     lam = out[0]
     Q = out[1]
     temp = softabs_map(lam, alpha)
     #print("explicit p {}".format(q.data))
     inv_exp_H = torch.mm(torch.mm(Q, torch.diag(1 / temp)), torch.t(Q))
     o = 0.5 * torch.dot(p.data, torch.mv(inv_exp_H, p.data))
     temp2 = 0.5 * torch.log((temp)).sum()
     print("explicit tau {}".format(o))
     print("explicit logdetmetric {}".format(temp2))
     return (o + temp2)
Ejemplo n.º 5
0
    def generate_momentum(self, q):

        #if lam == None or Q == None:
        #    H_ = self.linkedV.getH_tensor()
        #    lam, Q = eigen(H_)
        _, H_ = self.linkedV.getH_tensor(q)
        lam, Q = eigen(H_)
        temp = torch.mm(
            Q,
            torch.diag(torch.sqrt(softabs_map(lam,
                                              self.metric.msoftabsalpha))))
        out = point(None, self)
        out.flattened_tensor.copy_(torch.mv(temp, torch.randn(len(lam))))
        out.load_flatten()
        return (out)
Ejemplo n.º 6
0
def generate_momentum(q):
    # if lam == None or Q == None:
    #    H_ = self.linkedV.getH_tensor()
    #    lam, Q = eigen(H_)
    _, H_ = getH(q, V)
    lam, Q = eigen(H_.data)
    # print(lam)
    # print(Q)
    #exit()
    # print(lam.shape)
    # print(type(lam))
    # print(type(lam[0]))
    # exit()
    #print(lam)
    #exit()
    temp = torch.mm(Q, torch.diag(torch.sqrt(softabs_map(lam, alpha))))
    out = temp.mv(torch.randn(len(lam)))
    # print(temp)
    # exit()
    return (out)
Ejemplo n.º 7
0
    def generate_momentum(self,q):

        #if lam == None or Q == None:
        #    H_ = self.linkedV.getH_tensor()
        #    lam, Q = eigen(H_)
        _, H_ = self.linkedV.getH_tensor(q)
        lam, Q = eigen(H_)
        #print(lam)
        #print(Q)

        #print(lam.shape)
        #print(type(lam))
        #print(type(lam[0]))
        #exit()
        temp = torch.mm(Q, torch.diag(torch.sqrt(softabs_map(lam, self.metric.msoftabsalpha))))
        #print(temp)
        #exit()
        out = point(list_tensor=self.list_tensor,pointtype="p",need_flatten=self.need_flatten)
        out.flattened_tensor.copy_(torch.mv(temp, torch.randn(len(lam))))
        out.load_flatten()
        return(out)
Ejemplo n.º 8
0
    def evaluate_scalar(self):
        _, H_ = self.linkedV.getH_tensor()
        lam, Q = eigen(H_)

        temp = softabs_map(lam, self.metric.msoftabsalpha)

        inv_exp_H = torch.mm(torch.mm(Q, torch.diag(1 / temp)), torch.t(Q))

        #print("inv_exp_H {}".format(inv_exp_H))
        o = 0.5 * torch.dot(self.flattened_tensor,
                            torch.mv(inv_exp_H, self.flattened_tensor))
        temp2 = 0.5 * torch.log((temp)).sum()
        #print("alpha {}".format(self.metric.msoftabsalpha))
        #print("lam {}".format(lam))
        #print("H_ {}".format(H_))
        #print("H_2 {}".format(torch.mm(torch.mm(Q, torch.diag(temp)), torch.t(Q))))
        #print("msoftabslambda {}".format(temp))
        print("tau {}".format(o))
        print("logdetmetric {}".format(temp2))

        output = o + temp2
        return (output)
Ejemplo n.º 9
0
 def generate(q):
     lam, Q = eigen(getH(q, V).data)
     temp = torch.mm(Q, torch.diag(torch.sqrt(softabs_map(lam, alpha))))
     out = torch.mv(temp, torch.randn(len(lam)))
     return (out)