Esempio n. 1
0
    def net_builder(self, train_dict):

        neval = velocity_verlet3.neval
        pwnet_list = []
        mbnet_list = []
        pw4mb_list = []
        output_dim = 2
        for n in range(neval):
            pwnet_list.append(
                mydevice.load(
                    pw_net(n + 2, output_dim, train_dict["pwnet_nnodes"])))
            mbnet_list.append(
                mydevice.load(
                    mb_net(12 * n + 24, output_dim,
                           train_dict["mbnet_nnodes"])))
        for n in range(neval // 2):
            pw4mb_list.append(
                mydevice.load(pw_net(2, output_dim,
                                     train_dict["pw4mb_nnodes"])))

        ngrids = train_dict["ngrids"]
        b = train_dict["b"]
        pw_obj = pw_ff(pwnet_list)
        mb_obj = mb_ff(mbnet_list, pw4mb_list, ngrids, b)

        mbpw_obj = mbpw(mb_obj, pw_obj)

        # concatenate all network for checkpoint
        net_list = pwnet_list + mbnet_list + pw4mb_list

        return mbpw_obj, net_list
Esempio n. 2
0
    def __init__(self, train_dict, loss_dict):

        self.train_dict = train_dict

        pwnet_input = 5  # for pw force function
        mbnet_input = 25  # for mb force function

        mode = train_dict["nn_mode"]

        if mode == 'hf':
            output_dim = 1
        else:
            output_dim = 2

        pwnet1 = mydevice.load(
            pw_net(pwnet_input, output_dim, train_dict["nnodes"]))
        pwnet2 = mydevice.load(
            pw_net(pwnet_input, output_dim, train_dict["nnodes"]))
        mbnet1 = mydevice.load(
            mb_net(mbnet_input, output_dim, train_dict["nnodes"]))
        mbnet2 = mydevice.load(
            mb_net(mbnet_input, output_dim, train_dict["nnodes"]))
        self.net_list = [pwnet1, pwnet2, mbnet1, mbnet2]

        print('pwnet1', pwnet1)
        print('pwnet2', pwnet2)
        print('mbnet1', mbnet1)
        print('mbnet2', mbnet2)

        if mode == 'hf':
            pwforce = pw_hf(pwnet1, pwnet2, train_dict["force_clip"])
            mbforce = mb_hf(mbnet1, mbnet2, train_dict["ngrids"],
                            train_dict["b"], train_dict["force_clip"])
        else:
            pwforce = pw_ff(pwnet1, pwnet2, train_dict["force_clip"])
            mbforce = mb_ff(mbnet1, mbnet2, train_dict["ngrids"],
                            train_dict["b"], train_dict["force_clip"])

        self.mbpwff = mbpw(mbforce, pwforce)

        param = itertools.chain(pwnet1.parameters(), pwnet2.parameters(),
                                mbnet1.parameters(), mbnet2.parameters())
        self.opt = optim.SGD(param, lr=train_dict["lr"])
        #sch = optim.lr_scheduler.StepLR(self.opt,train_dict["sch_step"],train_dict["sch_decay"])
        self.sch = DecayCosineAnnealingWarmRestarts(self.opt,
                                                    train_dict["sch_step"],
                                                    train_dict["sch_decay"])
        self.opta = optim.SGD(self.mbpwff.parameters(), train_dict["alpha_lr"])

        self.mlvv = velocity_verlet(self.mbpwff)

        lj = lennard_jones2d()
        self.loss_obj = loss(lj, loss_dict["eweight"],
                             loss_dict["polynomial_degree"])

        self.ckpt = checkpoint(self.net_list, self.mbpwff, self.opt, self.opta,
                               self.sch)
Esempio n. 3
0
def pack_data(qpl_input, qpl_label):

    q_init = qpl_input[:,0,:,:].clone().detach().requires_grad_()
    p_init = qpl_input[:,1,:,:].clone().detach().requires_grad_()
    l_init = qpl_input[:,2,:,:].clone().detach().requires_grad_()
    q_label = qpl_label[:,0,:,:].clone().detach().requires_grad_()
    p_label = qpl_label[:,1,:,:].clone().detach().requires_grad_()

    q_init = mydevice.load(q_init)
    p_init = mydevice.load(p_init)
    l_init = mydevice.load(l_init)
    q_label = mydevice.load(q_label)
    p_label = mydevice.load(p_label)

    return q_init,p_init,q_label,p_label,l_init
Esempio n. 4
0
    def __init__(self, potential_function, eweight, poly_deg, eth=1e9):

        self.potential_function = potential_function

        # HK20220426
        self.loss_dict = {
            "total": [],
            "*qrmse": [],
            "-qmse": [],
            "-qmae": [],
            "*prmse": [],
            "-pmse": [],
            "-pmae": [],
            "*emae": [],
            "-emse": [],
            "*mmae": [],
            "qshape": [],
            "pshape": [],
            "eshape": [],
            "mshape": [],
            "eweight": [],
            "poly": []
        }
        self.ethrsh = torch.tensor(eth)
        self.ethrsh = mydevice.load(self.ethrsh)
        #self.eweight = eweight
        self.poly_deg = poly_deg
Esempio n. 5
0
def paired_distance_reduced(q, npar):

    l_list = torch.zeros(q.shape)
    l_list.fill_(1)
    l_list = mydevice.load(l_list)

    dq = delta_pbc(q,l_list) # shape is [nsamples, nparticle, nparticle, DIM]

    dq_reduced_index = get_paired_distance_indices.get_indices(dq.shape)
    dq_flatten = get_paired_distance_indices.reduce(dq, dq_reduced_index)
    # dq_flatten.shape is [nsamples x nparticle x (nparticle - 1) x DIM]

    dq_reshape = dq_flatten.view(q.shape[0], npar, npar - 1, q.shape[2])
    # dq_reshape.shape is [nsamples, nparticle, (nparticle - 1), DIM]

    dd = torch.sqrt(torch.sum(dq_reshape * dq_reshape, dim=-1))
    # dd.shape is [nsamples, nparticle, (nparticle - 1 )]
    return dq_reshape, dd
Esempio n. 6
0
    def prepare_input(self, q_list, p_list, l_list, tau, ngrids,
                      b):  # make dqdp for n particles

        nsamples, nparticle, DIM = q_list.shape

        u = self.make_grids_center(q_list, l_list, b)  # position at grids
        # u.shape is [nsample, nparticle*ngrids, DIM=2]
        u_fields = self.gen_u_fields(q_list, l_list, u, ngrids)
        # u_fields.shape is [nsamples, npartice, grids*DIM]

        v_fields = self.gen_v_fields(q_list, p_list, u, l_list, ngrids)
        tau_tensor = torch.zeros([nsamples, nparticle, 1])
        tau_tensor.fill_(tau * 0.5)
        tau_tensor = mydevice.load(tau_tensor)

        x = torch.cat((u_fields, v_fields, tau_tensor), dim=-1)
        # x.shape = shape is [ nsamples, nparticle,  ngrids * DIM + ngrids * DIM + 1]

        x = x.view(nsamples * nparticle, 2 * ngrids * DIM + 1)
        return x
Esempio n. 7
0
    def prepare_input(self,q_list,p_list,l_list,tau):
        nsamples, nparticle, DIM = q_list.shape

        dq = delta_pbc(q_list, l_list)
        # shape is [nsamples, nparticle, nparticle, DIM]
        dq = torch.reshape(dq, (nsamples * nparticle * nparticle, DIM))
        # shape is [nsamples* nparticle* nparticle, DIM]

        dp = delta_state(p_list)

        # dq.shape = dp.shape = [nsamples, nparticle, nparticle, 2]
        dp = torch.reshape(dp, (nsamples * nparticle * nparticle, DIM))
        # shape is [nsamples* nparticle* nparticle, DIM]

        tau_tensor = torch.zeros([nsamples*nparticle*nparticle, 1],requires_grad=False) + 0.5*tau
        tau_tensor = mydevice.load(tau_tensor)

        #tau_tensor.fill_(tau * 0.5)  # tau_tensor take them tau/2

        x = torch.cat((dq, dp, tau_tensor), dim=-1)
        # dqdp.shape is [ nsamples*nparticle*nparticle, 5]

        return x
Esempio n. 8
0
    print_dict('data',data)
    print_dict('main',maindict)

    data_set = my_data(data["train_file"],data["valid_file"],data["test_file"],data["n_chain"],
                       data["train_pts"],data["vald_pts"],data["test_pts"])
    loader = data_loader(data_set,data["batch_size"])

    train = trainer(traindict,lossdict)

    train.load_models()

    for e in range(maindict["start_epoch"], maindict["end_epoch"]):

        for qpl_input,qpl_label in loader.train_loader:

            mydevice.load(qpl_input)
    
            q_init,p_init,q_label,p_label,l_init = pack_data(qpl_input,qpl_label)

            train.one_step(q_init,p_init,q_label,p_label,l_init)

        if e%maindict["verb"]==0: 
            train.verbose(e+1,'train')
            system_logs.record_memory_usage(e+1)
            system_logs.record_time_usage(e+1)

        if e%maindict["ckpt_interval"]==0: 
            filename = './{}/mbpw{:06d}.pth'.format(maindict["save_dir"],e+1)
            train.checkpoint(filename)

        if e%maindict["val_interval"]==0: 
Esempio n. 9
0
 def hex_grids_list(self, b):
     grids_ncenter = torch.tensor([[-b * 0.5, -b], [-b * 0.5, b], [-b, 0.],
                                   [b, 0.], [b * 0.5, -b], [b * 0.5, b]])
     # grids_ncenter.shape is [6, 2]
     grids_ncenter = mydevice.load(grids_ncenter)
     return grids_ncenter
Esempio n. 10
0
        nnet(5, 2),
        nnet(6, 2),
        nnet(7, 2)
    ]
    mbnet_list = [
        nnet(24, 2),
        nnet(36, 2),
        nnet(48, 2),
        nnet(60, 2),
        nnet(72, 2),
        nnet(84, 2)
    ]
    pw4mb_list = [nnet(2, 2), nnet(2, 2), nnet(2, 2)]

    for pw, mb in zip(pwnet_list, mbnet_list):
        pw = mydevice.load(pw)
        mb = mydevice.load(mb)
    for pw4mb in pw4mb_list:
        pw4mb = mydevice.load(pw4mb)

    mb = mb_ff(mbnet_list, pw4mb_list, ngrids, b, force_clip, nsamples,
               nparticles)
    pw = pw_ff(pwnet_list, force_clip, nsamples, nparticles)

    mbpw_obj = mbpw(mb, pw)
    vv3 = velocity_verlet3(mbpw_obj)

    lr = 1e-4
    opt = optim.SGD(mbpw_obj.parameters(), lr)
    opt2 = optim.SGD(mbpw_obj.tau_parameters(), lr * 1e-1)
    sch = torch.optim.lr_scheduler.StepLR(opt, step_size=1000, gamma=0.9)