def generalized_leapfrog_softabs_op(q,p,epsilon,Ham,delta=0.1):
    # input output point object
    # can take anything but should output tensor
    stat = gleapfrog_stat()
    dV,H_,dH = Ham.V.getdH_tensor(q)
    lam, Q = eigen(H_)
    # dphidq outputs and inputs takes flattened gradient in flattened form
    p.flattened_tensor -= epsilon * 0.5 * Ham.T.dphidq(lam,dH,Q,dV)
    p.load_flatten()
    rho = p.flattened_tensor.clone()
    pprime = p.flattened_tensor.clone()
    deltap = delta + 0.5
    count = 0
    while (deltap > delta) and (count < 5):
        # dtaudq returns gradient in flattened form
        pprime = rho - epsilon * 0.5 * Ham.T.dtaudq(p.flattened_tensor,dH,Q,lam)
        deltap = torch.max(torch.abs(p.flattened_tensor-pprime))
        p.flattened_tensor.copy_(pprime)
        p.load_flatten()
        count = count + 1

    sigma = q.point_clone()
    qprime = q.flattened_tensor.clone()
    deltaq = delta + 0.5

    _,H_ = Ham.V.getH_tensor(sigma)
    olam,oQ = eigen(H_)
    count = 0
    while (deltaq > delta) and (count < 5):
        _,H_ = Ham.V.getH_tensor(q)
        lam,Q = eigen(H_)
        qprime = sigma.flattened_tensor + 0.5 * epsilon * Ham.T.dtaudp(p.flattened_tensor,olam,oQ) + \
                 0.5 * epsilon* Ham.T.dtaudp(p.flattened_tensor,lam,Q)
        deltaq = torch.max(torch.abs(q.flattened_tensor-qprime))
        q.flattened_tensor.copy_(qprime)
        q.load_flatten()
        count = count + 1

    if deltaq>delta:
        stat.second_divergent = True
        stat.divergent =True
        #print("second fi div")
        return(q,p,stat)
    else:
        stat.second_divergent = False
        q.flattened_tensor.copy_(qprime)
        q.load_flatten()
    dV,H_,dH = Ham.V.getdH_tensor(q)
    lam,Q = eigen(H_)

    p.flattened_tensor -= 0.5 * epsilon * Ham.T.dtaudq(p.flattened_tensor,dH,Q,lam)
    p.load_flatten()
    p.flattened_tensor -=0.5 * epsilon * Ham.T.dphidq(lam,dH,Q,dV)
    p.load_flatten()

    return(q,p,stat)
Exemplo n.º 2
0
    def evaluate_scalar(self,q_point=None,p_point=None):
        if not q_point is None:
            self.linkedV.load_point(q_point)
        if not p_point is None:
            self.load_point(p_point)
        _, H_ = self.linkedV.getH_tensor()
        #debug_dict.update({"abstract":_.clone()})

        lam, Q = eigen(H_)

        temp = softabs_map(lam, self.metric.msoftabsalpha)

        inv_exp_H = torch.mm(torch.mm(Q, torch.diag(1/temp)), torch.t(Q))
        #print("abstract p {}".format(self.flattened_tensor))
        #print("inv_exp_H {}".format(inv_exp_H))
        o = 0.5 * torch.dot(self.flattened_tensor, torch.mv(inv_exp_H, self.flattened_tensor))
        temp2 = 0.5 * torch.log((temp)).sum()
        #print("alpha {}".format(self.metric.msoftabsalpha))
        #print("lam {}".format(lam))
        #print("H_ {}".format(H_))
        #print("H_2 {}".format(torch.mm(torch.mm(Q, torch.diag(temp)), torch.t(Q))))
        #print("msoftabslambda {}".format(temp))
        #print("abstract tau {}".format(o))
        #print("abstract logdetmetric {}".format(temp2))

        output = o + temp2
        return (output)
 def T_givenq(p):
     _,H_ = getH(q,V)
     out = eigen(H_.data)
     lam = out[0]
     Q = out[1]
     temp = softabs_map(lam,alpha)
     inv_exp_H = torch.mm(torch.mm(Q,torch.diag(1/temp)),torch.t(Q))
     o = 0.5 * torch.dot(p.data,torch.mv(inv_exp_H,p.data))
     temp2 = 0.5 * torch.log((temp)).sum()
     return(o + temp2)
Exemplo n.º 4
0
 def T_givenq(p):
     _, H_ = getH(q, V)
     #debug_dict.update({"explicit":_.data.clone()})
     out = eigen(H_.data)
     lam = out[0]
     Q = out[1]
     temp = softabs_map(lam, alpha)
     #print("explicit p {}".format(q.data))
     inv_exp_H = torch.mm(torch.mm(Q, torch.diag(1 / temp)), torch.t(Q))
     o = 0.5 * torch.dot(p.data, torch.mv(inv_exp_H, p.data))
     temp2 = 0.5 * torch.log((temp)).sum()
     print("explicit tau {}".format(o))
     print("explicit logdetmetric {}".format(temp2))
     return (o + temp2)
Exemplo n.º 5
0
    def generate_momentum(self, q):

        #if lam == None or Q == None:
        #    H_ = self.linkedV.getH_tensor()
        #    lam, Q = eigen(H_)
        _, H_ = self.linkedV.getH_tensor(q)
        lam, Q = eigen(H_)
        temp = torch.mm(
            Q,
            torch.diag(torch.sqrt(softabs_map(lam,
                                              self.metric.msoftabsalpha))))
        out = point(None, self)
        out.flattened_tensor.copy_(torch.mv(temp, torch.randn(len(lam))))
        out.load_flatten()
        return (out)
Exemplo n.º 6
0
def generate_momentum(q):
    # if lam == None or Q == None:
    #    H_ = self.linkedV.getH_tensor()
    #    lam, Q = eigen(H_)
    _, H_ = getH(q, V)
    lam, Q = eigen(H_.data)
    # print(lam)
    # print(Q)
    #exit()
    # print(lam.shape)
    # print(type(lam))
    # print(type(lam[0]))
    # exit()
    #print(lam)
    #exit()
    temp = torch.mm(Q, torch.diag(torch.sqrt(softabs_map(lam, alpha))))
    out = temp.mv(torch.randn(len(lam)))
    # print(temp)
    # exit()
    return (out)
Exemplo n.º 7
0
    def generate_momentum(self,q):

        #if lam == None or Q == None:
        #    H_ = self.linkedV.getH_tensor()
        #    lam, Q = eigen(H_)
        _, H_ = self.linkedV.getH_tensor(q)
        lam, Q = eigen(H_)
        #print(lam)
        #print(Q)

        #print(lam.shape)
        #print(type(lam))
        #print(type(lam[0]))
        #exit()
        temp = torch.mm(Q, torch.diag(torch.sqrt(softabs_map(lam, self.metric.msoftabsalpha))))
        #print(temp)
        #exit()
        out = point(list_tensor=self.list_tensor,pointtype="p",need_flatten=self.need_flatten)
        out.flattened_tensor.copy_(torch.mv(temp, torch.randn(len(lam))))
        out.load_flatten()
        return(out)
Exemplo n.º 8
0
    def evaluate_scalar(self):
        _, H_ = self.linkedV.getH_tensor()
        lam, Q = eigen(H_)

        temp = softabs_map(lam, self.metric.msoftabsalpha)

        inv_exp_H = torch.mm(torch.mm(Q, torch.diag(1 / temp)), torch.t(Q))

        #print("inv_exp_H {}".format(inv_exp_H))
        o = 0.5 * torch.dot(self.flattened_tensor,
                            torch.mv(inv_exp_H, self.flattened_tensor))
        temp2 = 0.5 * torch.log((temp)).sum()
        #print("alpha {}".format(self.metric.msoftabsalpha))
        #print("lam {}".format(lam))
        #print("H_ {}".format(H_))
        #print("H_2 {}".format(torch.mm(torch.mm(Q, torch.diag(temp)), torch.t(Q))))
        #print("msoftabslambda {}".format(temp))
        print("tau {}".format(o))
        print("logdetmetric {}".format(temp2))

        output = o + temp2
        return (output)
        inv_exp_H = torch.mm(torch.mm(Q,torch.diag(1/temp)),torch.t(Q))
        o = 0.5 * torch.dot(p.data,torch.mv(inv_exp_H,p.data))
        temp2 = 0.5 * torch.log((temp)).sum()
        return(o + temp2)
    return(T_givenq)

def H(q,p,alpha):
    # returns float
    return(V(q).data[0] + T(q,alpha)(p))



store = torch.zeros((chain_l,dim))

g,H_ = getH(q,V)
lam,Q = eigen(H_.data)

begin = time.time()
for i in range(chain_l):
    print("round {}".format(i))
    out = rmhmc_step(q,H,0.1,10,alp,0.1,V)
    store[i,]=out[0].data
    q.data = out[0].data
totalt = time.time() - begin

store = store[burn_in:,]
store = store.numpy()
empCov = np.cov(store,rowvar=False)
emmean = np.mean(store,axis=0)
print("length of chain is {}".format(chain_l))
print("burn in is {}".format(burn_in))
Exemplo n.º 10
0
def generalized_leapfrog(q, p, epsilon, Ham, delta=1e-8, debug_dict=None):
    # input output point object
    # can take anything but should output tensor
    #print("first q abstract {}".format(q.flattened_tensor))
    #print("first p abstract {}".format(p.flattened_tensor))
    q_dummy = q.point_clone()
    p_dummy = p.point_clone()
    stat = gleapfrog_stat()
    dV, H_, dH = Ham.V.getdH_tensor(q_dummy)
    #print("dH abstract {}".format(dH))

    lam, Q = eigen(H_)

    #print("second q abstract {}".format(q.flattened_tensor))
    #print("second p abstract {}".format(p.flattened_tensor))
    # dphidq outputs and inputs takes flattened gradient in flattened form
    p_dummy.flattened_tensor -= epsilon * 0.5 * Ham.T.dphidq(
        lam=lam, dH=dH, Q=Q, dV=dV)

    #print("third q abstract {}".format(q.flattened_tensor))
    #print("third p abstract {}".format(p.flattened_tensor))
    #p.load_flatten()
    rho = p_dummy.flattened_tensor.clone()
    pprime = p_dummy.flattened_tensor.clone()
    deltap = delta + 0.5
    count = 0

    while (deltap > delta) and (count < 10):
        # dtaudq returns gradient in flattened form
        pprime.copy_(rho - epsilon * 0.5 * Ham.T.dtaudq(
            p_flattened_tensor=p_dummy.flattened_tensor, dH=dH, Q=Q, lam=lam))
        deltap = torch.max(torch.abs(p_dummy.flattened_tensor - pprime))
        p_dummy.flattened_tensor.copy_(pprime)
        p_dummy.load_flatten()
        count = count + 1
    if deltap > delta:
        if deltap > 50:
            stat.divergent = True
        #stat.divergent = True
        stat.first_divergent = True

        #print("pprime {}".format(pprime))
        #print("deltap {}".format(deltap))
        #print("first fi div")
        #print(count)
        #return (q, p, stat)
        return (q_dummy, p_dummy, stat)
    else:
        stat.first_divergent = False

    #print(first_fi_divergent)
    #print(p.flattened_tensor)
    sigma = q_dummy.point_clone()
    qprime = q_dummy.flattened_tensor.clone()
    deltaq = delta + 0.5

    _, H_ = Ham.V.getH_tensor(sigma)

    olam, oQ = eigen(H_)

    count = 0

    while (deltaq > delta) and (count < 10):
        _, H_ = Ham.V.getH_tensor(q_dummy)
        lam, Q = eigen(H_)
        qprime.copy_(sigma.flattened_tensor + 0.5 * epsilon * Ham.T.dtaudp(p_dummy.flattened_tensor,olam,oQ) + \
                 0.5 * epsilon* Ham.T.dtaudp(p_dummy.flattened_tensor,lam,Q))
        deltaq = torch.max(torch.abs(q_dummy.flattened_tensor - qprime))
        q_dummy.flattened_tensor.copy_(qprime)
        q_dummy.load_flatten()
        count = count + 1
    if deltaq > delta:
        stat.second_divergent = True
        if deltaq > 50:
            stat.divergent = True
        #print("deltaq {}".format(deltaq))
        #print("second fi div")
        #assert stat.divergent
        #return(q,p,stat)
        return (q_dummy, p_dummy, stat)
    else:
        stat.second_divergent = False
    #print("H is {}".format(Ham.evaluate(q,p)))

    dV, H_, dH = Ham.V.getdH_tensor(q_dummy)
    #print(q.flattened_tensor)
    lam, Q = eigen(H_)
    #print(0.5 * epsilon * Ham.T.dtaudq(p.flattened_tensor,dH,Q,lam))

    p_dummy.flattened_tensor -= 0.5 * epsilon * Ham.T.dtaudq(
        p_dummy.flattened_tensor, dH, Q, lam)
    #print("H is {}".format(Ham.evaluate(q, p)))
    #p.load_flatten()

    p_dummy.flattened_tensor -= 0.5 * epsilon * Ham.T.dphidq(lam, dH, Q, dV)

    #p.load_flatten()
    #print("yes")
    #debug_dict.update({"abstract": p.flattened_tensor.clone()})
    #return(q,p,stat)
    return (q_dummy, p_dummy, stat)


# def generalized_leapfrog_softabsdiag(q,p,epsilon,Ham,delta=0.1):
#     # input output point object
#     # can take anything but should output tensor
#     stat = gleapfrog_stat()
#     dV,mdiagH,mgraddiagH = Ham.V.get_graddiagH(q)
#     #print(dV.shape)
#     #exit()
#     mlambda,_ = Ham.T.fcomputeMetric(mdiagH)
#     # dphidq outputs and inputs takes flattened gradient in flattened form
#     p.flattened_tensor -= epsilon * 0.5 * Ham.T.dphidq(dV=dV,mdiagH=mdiagH,mgraddiagH=mgraddiagH,mlambda=mlambda)
#     p.load_flatten()
#     rho = p.flattened_tensor.clone()
#     pprime = p.flattened_tensor.clone()
#     deltap = delta + 0.5
#     count = 0
#     while (deltap > delta) and (count < 5):
#         # dtaudq returns gradient in flattened form
#         pprime = rho - epsilon * 0.5 * Ham.T.dtaudq(p.flattened_tensor,mdiagH,mlambda,mgraddiagH)
#         deltap = torch.max(torch.abs(p.flattened_tensor-pprime))
#         p.flattened_tensor.copy_(pprime)
#         p.load_flatten()
#         count = count + 1
#     if deltap>delta:
#         stat.divergent = True
#         stat.first_divergent = True
#         #print("pprime {}".format(pprime))
#         #print("deltap {}".format(deltap))
#         #print("first fi div")
#         #print(count)
#         return (q, p, stat)
#     else:
#         stat.first_divergent = False
#         p.flattened_tensor.copy_(pprime)
#         p.load_flatten()
#
#     sigma = q.point_clone()
#     qprime = q.flattened_tensor.clone()
#     deltaq = delta + 0.5
#
#     _,mdiagH = Ham.V.getdiagH_tensor(sigma)
#     omlambda,_ = Ham.T.fcomputeMetric(mdiagH)
#     count = 0
#     while (deltaq > delta) and (count < 5):
#         _,mdiagH = Ham.V.getdiagH_tensor(q)
#         mlambda,_ = Ham.T.fcomputeMetric(mdiagH)
#         qprime = sigma.flattened_tensor + 0.5 * epsilon * Ham.T.dtaudp(p.flattened_tensor,omlambda) + \
#                  0.5 * epsilon* Ham.T.dtaudp(p.flattened_tensor,mlambda)
#         deltaq = torch.max(torch.abs(q.flattened_tensor-qprime))
#         q.flattened_tensor.copy_(qprime)
#         q.load_flatten()
#         count = count + 1
#     if deltaq>delta:
#         stat.second_divergent = True
#         stat.divergent =True
#         #print("second fi div")
#         return(q,p,stat)
#     else:
#         stat.second_divergent = False
#         q.flattened_tensor.copy_(qprime)
#         q.load_flatten()
#     #print("H is {}".format(Ham.evaluate(q,p)))
#
#     dV, mdiagH, mgraddiagH = Ham.V.get_graddiagH(q)
#     mlambda, _ = Ham.T.fcomputeMetric(mdiagH)
#
#     p.flattened_tensor -= 0.5 * epsilon * Ham.T.dtaudq(p.flattened_tensor,mdiagH,mlambda,mgraddiagH)
#     p.load_flatten()
#     p.flattened_tensor -=0.5 * epsilon * Ham.T.dphidq(dV,mdiagH,mgraddiagH,mlambda)
#     p.load_flatten()
#
#     return(q,p,stat)
#
# def generalized_leapfrog_softabs_op(q,p,epsilon,Ham,delta=0.1):
#     # input output point object
#     # can take anything but should output tensor
#     stat = gleapfrog_stat()
#     dV,H_,dH = Ham.V.getdH_tensor(q)
#     lam, Q = eigen(H_)
#     # dphidq outputs and inputs takes flattened gradient in flattened form
#     p.flattened_tensor -= epsilon * 0.5 * Ham.T.dphidq(lam,dH,Q,dV)
#     p.load_flatten()
#     rho = p.flattened_tensor.clone()
#     pprime = p.flattened_tensor.clone()
#     deltap = delta + 0.5
#     count = 0
#     while (deltap > delta) and (count < 5):
#         # dtaudq returns gradient in flattened form
#         pprime = rho - epsilon * 0.5 * Ham.T.dtaudq(p.flattened_tensor,dH,Q,lam)
#         deltap = torch.max(torch.abs(p.flattened_tensor-pprime))
#         p.flattened_tensor.copy_(pprime)
#         p.load_flatten()
#         count = count + 1
#
#     sigma = q.point_clone()
#     qprime = q.flattened_tensor.clone()
#     deltaq = delta + 0.5
#
#     _,H_ = Ham.V.getH_tensor(sigma)
#     olam,oQ = eigen(H_)
#     count = 0
#     while (deltaq > delta) and (count < 5):
#         _,H_ = Ham.V.getH_tensor(q)
#         lam,Q = eigen(H_)
#         qprime = sigma.flattened_tensor + 0.5 * epsilon * Ham.T.dtaudp(p.flattened_tensor,olam,oQ) + \
#                  0.5 * epsilon* Ham.T.dtaudp(p.flattened_tensor,lam,Q)
#         deltaq = torch.max(torch.abs(q.flattened_tensor-qprime))
#         q.flattened_tensor.copy_(qprime)
#         q.load_flatten()
#         count = count + 1
#
#     if deltaq>delta:
#         stat.second_divergent = True
#         stat.divergent =True
#         #print("second fi div")
#         return(q,p,stat)
#     else:
#         stat.second_divergent = False
#         q.flattened_tensor.copy_(qprime)
#         q.load_flatten()
#     dV,H_,dH = Ham.V.getdH_tensor(q)
#     lam,Q = eigen(H_)
#
#     p.flattened_tensor -= 0.5 * epsilon * Ham.T.dtaudq(p.flattened_tensor,dH,Q,lam)
#     p.load_flatten()
#     p.flattened_tensor -=0.5 * epsilon * Ham.T.dphidq(lam,dH,Q,dV)
#     p.load_flatten()
#
#     return(q,p,stat)
#
# def generalized_leapfrog_softabs_op_diag(q,p,epsilon,Ham,delta=0.1):
#     # input output point object
#     # can take anything but should output tensor
#     stat = gleapfrog_stat()
#     dV,H_,dH = Ham.V.getdH_tensor(q)
#     lam, Q = eigen(H_)
#     # dphidq outputs and inputs takes flattened gradient in flattened form
#     p.flattened_tensor -= epsilon * 0.5 * Ham.T.dphidq(lam,dH,Q,dV)
#     p.load_flatten()
#     rho = p.flattened_tensor.clone()
#     pprime = p.flattened_tensor.clone()
#     deltap = delta + 0.5
#     count = 0
#     while (deltap > delta) and (count < 10):
#         # dtaudq returns gradient in flattened form
#         pprime = rho - epsilon * 0.5 * Ham.T.dtaudq(p.flattened_tensor,dH,Q,lam)
#         deltap = torch.max(torch.abs(p.flattened_tensor-pprime))
#         p.flattened_tensor.copy_(pprime)
#         p.load_flatten()
#         count = count + 1
#     if deltap>delta:
#         stat.divergent = True
#         stat.first_divergent = True
#         #print("pprime {}".format(pprime))
#         #print("deltap {}".format(deltap))
#         #print("first fi div")
#         #print(count)
#         return (q, p, stat)
#     else:
#         stat.first_divergent = False
#         p.flattened_tensor.copy_(pprime)
#         p.load_flatten()
#
#     sigma = q.point_clone()
#     qprime = q.flattened_tensor.clone()
#     deltaq = delta + 0.5
#
#     _,H_ = Ham.V.getH_tensor(sigma)
#     olam,oQ = eigen(H_)
#     count = 0
#     while (deltaq > delta) and (count < 10):
#         _,H_ = Ham.V.getH_tensor(q)
#         lam,Q = eigen(H_)
#         qprime = sigma.flattened_tensor + 0.5 * epsilon * Ham.T.dtaudp(p.flattened_tensor,olam,oQ) + \
#                  0.5 * epsilon* Ham.T.dtaudp(p.flattened_tensor,lam,Q)
#         deltaq = torch.max(torch.abs(q.flattened_tensor-qprime))
#         q.flattened_tensor.copy_(qprime)
#         q.load_flatten()
#         count = count + 1
#
#     if deltaq>delta:
#         stat.second_divergent = True
#         stat.divergent =True
#         #print("second fi div")
#         return(q,p,stat)
#     else:
#         stat.second_divergent = False
#         q.flattened_tensor.copy_(qprime)
#         q.load_flatten()
#     dV,H_,dH = Ham.V.getdH_tensor(q)
#     lam,Q = eigen(H_)
#
#     p.flattened_tensor -= 0.5 * epsilon * Ham.T.dtaudq(p.flattened_tensor,dH,Q,lam)
#     p.load_flatten()
#     p.flattened_tensor -=0.5 * epsilon * Ham.T.dphidq(lam,dH,Q,dV)
#     p.load_flatten()
#
#     return(q,p)
# def rmhmc_step(init_q,epsilon,L,Ham,evolve_t=None,careful=True):
#
#
#     Ham.diagnostics = time_diagnositcs()
#     q = init_q.point_clone()
#
#     init_p = Ham.T.generate_momentum(q)
#     p = init_p.point_clone()
#     current_H = Ham.evaluate(q,p)
#     num_transitions = L
#     divergent = False
#
#     for i in range(L):
#         out = Ham.integrator(q,p,epsilon,Ham)
#         q = out[0]
#         p = out[1]
#         if careful:
#             temp_H = Ham.evaluate(q, p)
#             if (abs(temp_H - current_H) > 1000):
#                 return_q = init_q
#                 return_p = None
#                 return_H = current_H
#                 accept_rate = 0
#                 accepted = False
#                 divergent = True
#                 num_transitions = i
#                 break
#     if not divergent:
#         proposed_H = Ham.evaluate(q,p)
#         u = numpy.random.rand(1)
#
#         if (abs(current_H - proposed_H) > 1000):
#             divergent = True
#         else:
#             divergent = False
#     accept_rate = math.exp(min(0,current_H - proposed_H))
#     if u < accept_rate:
#         next_q = q
#         proposed_p = p
#         next_H = proposed_H
#         accepted = True
#     else:
#         next_q = init_q
#         proposed_p = None
#         accepted = False
#         next_H = current_H
#     return(next_q,proposed_p,init_p,next_H,accepted,accept_rate,divergent,num_transitions)
Exemplo n.º 11
0
def p_sharp(q, p):
    _, H = getH(q, V)
    lam, Q = eigen(H.data)
    p_s = dtaudp(p.data, alp, lam, Q)
    return (p_s)
Exemplo n.º 12
0
from explicit.genleapfrog_ult_util import eigen
import torch, numpy
inp = torch.eye(5)
input = numpy.array([[0.0, 1], [-1, 0]])
#print(input)

#a = torch.from_numpy(input)
#print(a)
#lam,Q = a.eig(eigenvectors=True)
#print(lam)
#exit()
#lam[:,1]==0
#exit()
#print(inp)

lam, Q = eigen(inp)

exit()

print(lam)
print(Q)

print(Q.mm((lam)).mm(Q.t()))
def generalized_leapfrog(q,p,epsilon,Ham,delta=0.1,debug_dict=None):
    # input output point object
    # can take anything but should output tensor
    #print("first q abstract {}".format(q.flattened_tensor))
    #print("first p abstract {}".format(p.flattened_tensor))
    stat = gleapfrog_stat()
    dV,H_,dH = Ham.V.getdH_tensor(q)
    #print("dH abstract {}".format(dH))

    lam, Q = eigen(H_)

    #print("second q abstract {}".format(q.flattened_tensor))
    #print("second p abstract {}".format(p.flattened_tensor))
    # dphidq outputs and inputs takes flattened gradient in flattened form
    p.flattened_tensor -= epsilon * 0.5 * Ham.T.dphidq(lam,dH,Q,dV)

    #print("third q abstract {}".format(q.flattened_tensor))
    #print("third p abstract {}".format(p.flattened_tensor))
    p.load_flatten()
    rho = p.flattened_tensor.clone()
    pprime = p.flattened_tensor.clone()
    deltap = delta + 0.5
    count = 0

    while (deltap > delta) and (count < 10):
        # dtaudq returns gradient in flattened form
        pprime.copy_(rho - epsilon * 0.5 * Ham.T.dtaudq(p.flattened_tensor,dH,Q,lam))
        deltap = torch.max(torch.abs(p.flattened_tensor-pprime))
        p.flattened_tensor.copy_(pprime)
        p.load_flatten()
        count = count + 1
    if deltap>delta:
        stat.divergent = True
        stat.first_divergent = True
        #print("pprime {}".format(pprime))
        #print("deltap {}".format(deltap))
        #print("first fi div")
        #print(count)
        return (q, p, stat)
    else:
        stat.first_divergent = False



    #print(first_fi_divergent)
    #print(p.flattened_tensor)
    sigma = q.point_clone()
    qprime = q.flattened_tensor.clone()
    deltaq = delta + 0.5

    _,H_ = Ham.V.getH_tensor(sigma)

    olam,oQ = eigen(H_)

    count = 0


    while (deltaq > delta) and (count < 10):
        _,H_ = Ham.V.getH_tensor(q)
        lam,Q = eigen(H_)
        qprime = sigma.flattened_tensor + 0.5 * epsilon * Ham.T.dtaudp(p.flattened_tensor,olam,oQ) + \
                 0.5 * epsilon* Ham.T.dtaudp(p.flattened_tensor,lam,Q)
        deltaq = torch.max(torch.abs(q.flattened_tensor-qprime))
        q.flattened_tensor.copy_(qprime)
        q.load_flatten()
        count = count + 1
    if deltaq>delta:
        stat.second_divergent = True
        stat.divergent =True
        #print("second fi div")
        return(q,p,stat)
    else:
        stat.second_divergent = False
    #print("H is {}".format(Ham.evaluate(q,p)))


    dV,H_,dH = Ham.V.getdH_tensor(q)
    lam,Q = eigen(H_)
    #print(0.5 * epsilon * Ham.T.dtaudq(p.flattened_tensor,dH,Q,lam))

    p.flattened_tensor -= 0.5 * epsilon * Ham.T.dtaudq(p.flattened_tensor,dH,Q,lam)
    #print("H is {}".format(Ham.evaluate(q, p)))
    p.load_flatten()

    p.flattened_tensor -= 0.5 * epsilon * Ham.T.dphidq(lam,dH,Q,dV)

    p.load_flatten()
    #print("yes")
    debug_dict.update({"abstract": p.flattened_tensor.clone()})
    return(q,p,stat)
Exemplo n.º 14
0
 def generate(q):
     lam, Q = eigen(getH(q, V).data)
     temp = torch.mm(Q, torch.diag(torch.sqrt(softabs_map(lam, alpha))))
     out = torch.mv(temp, torch.randn(len(lam)))
     return (out)
def p_sharp(q, p):
    lam, Q = eigen(getH(q, V).data)
    p_s = dtaudp(p.data, alp, lam, Q)
    return (p_s)