예제 #1
0
 def __init__(self, mb, pw):
     self.mb = mb
     self.pw = pw
     self.alpha1 = torch.zeros([1],
                               requires_grad=True,
                               device=mydevice.get())
     self.alpha2 = torch.zeros([1],
                               requires_grad=True,
                               device=mydevice.get())
예제 #2
0
 def __init__(self, mb, pw):
     self.mb = mb
     self.pw = pw
     self.alpha1 = torch.tensor([-1.35],
                                requires_grad=True,
                                device=mydevice.get())  # start with 12% mb
     self.alpha2 = torch.tensor([-1.35],
                                requires_grad=True,
                                device=mydevice.get())  # start with 12% mb
예제 #3
0
 def clamp_a(self, alp):
     lower_thrsh = torch.tensor([-5.0],
                                requires_grad=True,
                                device=mydevice.get())  # 0.9991 use pw net
     upper_thrsh = torch.tensor([2.2],
                                requires_grad=True,
                                device=mydevice.get())
     # 0.9002 use mb net -- found that mb net can't be optimized well
     if alp.data < lower_thrsh.data: alp.data = lower_thrsh.data
     if alp.data > upper_thrsh.data: alp.data = upper_thrsh.data
예제 #4
0
    def ufields(self, q, p, l_list, u_list, pwnet,
                ngrids):  # u_list = grid center position
        # u_list.shape is [nsamples, nparticles*ngrids, DIM=2]
        # l_list.shape is [nsamples, nparticles, DIM]
        nsamples, nparticles, DIM = q.shape
        xi = q / l_list  # dimensionless

        l_list4u = l_list.repeat_interleave(ngrids, dim=1)
        u_dimless = u_list / l_list4u  # dimensionless

        l_reduced = torch.ones(
            l_list.shape, requires_grad=False,
            device=mydevice.get())  # shape [nsamples,nparticles,dim]
        l_reduced = torch.unsqueeze(
            l_reduced, dim=2)  # shape is [nsamples, nparticles, 1, DIM]
        l_reduced = l_reduced.repeat_interleave(u_list.shape[1], dim=2)
        # l_reduced.shape is [nsamples, nparticles, nparticles * ngrids, DIM]

        _, d_sq = self.dpair_pbc_sq(xi, u_dimless, l_reduced)
        # d.shape is [nsamples, nparticles, nparticles * ngrids]

        l_list = l_list[:, :, 0]
        l_list = l_list.view(nsamples, nparticles, 1)
        # l_list.shape = [nsamples, nparticles, 1]
        dq_sq = d_sq * l_list * l_list

        dq_sq = dq_sq.view(nsamples * nparticles * nparticles * ngrids,
                           1)  # dq^2
        # dq.shape is [nsamples * nparticles * nparticles * ngrids, 1]

        del_p = delta_state(
            p)  #  shape is [nsamples, nparticles, nparticles, DIM]

        dp_sq = torch.sum(
            del_p * del_p,
            dim=-1)  #  shape is [nsamples, nparticles, nparticles]

        dp_sq = torch.unsqueeze(
            dp_sq,
            dim=3)  # l_list.shape is [nsamples, nparticles, nparticles,1]
        dp_sq = dp_sq.repeat_interleave(ngrids, dim=3)
        # dp.shape is [nsamples, nparticles, nparticles, ngrids ]

        dp_sq = dp_sq.view(nsamples * nparticles * nparticles * ngrids,
                           1)  # |dp|
        # dp.shape is [nsamples * nparticles * nparticles * ngrids, 1]

        x = torch.cat((dq_sq, dp_sq), dim=-1)
        # x.shape = [batch, 2] - dq^2, dp^2
        pair_pwnet = pwnet(x, dq_sq)
        # pair_pwnet.shape = [batch, 2] - fx, fy

        pair_pwnet = self.zero_ufields(pair_pwnet, nsamples, nparticles,
                                       ngrids)
        # pair_pwnet.shape is [nsamples, nparticles, nparticles*ngrids, DIM]

        dphi_fields = torch.sum(pair_pwnet,
                                dim=1)  # np.sum axis=2 j != k ( nsamples-1)
        # dphi_fields.shape is [nsamples, nparticles * ngrids, DIM=2]
        return dphi_fields
예제 #5
0
 def make_mask(self, nsamples, nparticles):
     dim = self.net_list[0].output_dim
     # mask to mask out self force when doing net predictions
     self.mask = torch.ones([nsamples, nparticles, nparticles, dim],
                            device=mydevice.get())
     dia = torch.diagonal(self.mask, dim1=1, dim2=2)
     dia.fill_(0.0)
예제 #6
0
 def clamp_alpha(self):
     thrsh = 7.0
     if self.alpha1 < -thrsh:
         self.alpha1.data = torch.tensor([-thrsh],
                                         requires_grad=True,
                                         device=mydevice.get())
     if self.alpha1 > thrsh:
         self.alpha1.data = torch.tensor([thrsh],
                                         requires_grad=True,
                                         device=mydevice.get())
     if self.alpha2 < -thrsh:
         self.alpha2.data = torch.tensor([-thrsh],
                                         requires_grad=True,
                                         device=mydevice.get())
     if self.alpha2 > thrsh:
         self.alpha2.data = torch.tensor([thrsh],
                                         requires_grad=True,
                                         device=mydevice.get())
예제 #7
0
 def make_mask(self, nsamples, nparticles):
     #if self.mask is None:
     # slow method
     dim = self.net_list[0].output_dim
     # mask to mask out self force when doing net predictions
     self.mask = torch.ones([nsamples, nparticles, nparticles, dim],
                            device=mydevice.get())
     for np in range(nparticles):
         self.mask[:, np, np, :] = 0.0
예제 #8
0
 def __init__(self, net_list, neval=6):
     self.net_list = net_list
     self.tau = torch.ones([neval],
                           requires_grad=True,
                           device=mydevice.get())