示例#1
0
def trainEnergy(model, optimizer, criterion, dataloaderTrain, Nepochs, L,
                radious, maxNumNeighs, device):
    print("Entering the training Stage")

    for epoch in range(1, Nepochs + 1):
        # monitor training loss
        train_loss = 0.0
        model.train()

        # monitoring time elapsed
        start = time.time()
        ###################
        # train the model #
        ###################
        for pos, energy in dataloaderTrain:

            # computing the interaction list (via numba)
            neighbor_list = computInterListOpt(pos.numpy(), L, radious,
                                               maxNumNeighs)
            # moving list to pytorch and moving to device
            neighbor_list = torch.tensor(neighbor_list).to(device)
            #send to the device (either cpu or gpu)
            pos, energy = pos.to(device), energy.to(device)
            # clear the gradients of all optimized variables
            optimizer.zero_grad()
            # forward pass: compute predicted outputs by passing inputs to the model
            energyNN = model(pos, neighbor_list)
            # calculate the loss
            loss = criterion(energyNN, energy)
            # backward pass: compute gradient of the loss with respect to model parameters
            loss.backward()
            # perform a single optimization step (parameter update)
            optimizer.step()
            # update running training loss
            train_loss += loss.item()

        # monitoring the elapsed time
        end = time.time()
        # print avg training statistics
        train_loss = train_loss / len(dataloaderTrain)
        print(
            'Epoch: {} \tTraining Loss: {:.10f} \t Time per epoch: {:.3f} [s]'.
            format(epoch, train_loss, end - start),
            flush=True)
示例#2
0
# creating the data sets (we don't consider testing so far)
datasetTrain = torch.utils.data.TensorDataset(pointsArrayTorch,
                                              potentialArrayTorch)

# creating the data loader
dataloaderTrain = torch.utils.data.DataLoader(datasetTrain,
                                              batch_size=batchSize,
                                              shuffle=True,
                                              num_workers=4)

# computing an estimate of ave and std
pointsArrayTorchSmall = pointsArrayTorch[:16, :]

pointsnumpy = pointsArrayTorchSmall.numpy()

neighbor_list = computInterListOpt(pointsnumpy, Lcell * Ncells, radious,
                                   maxNumNeighs)

neighbor_list = torch.tensor(neighbor_list)

(dist, distInv) = genCoordinates(pointsArrayTorchSmall, neighbor_list,
                                 Lcell * Ncells)

# we compute the mean and std (only the positive values)
# given that the zero values are just padding.
ave = torch.stack(
    [torch.mean(dist[dist > 0]),
     torch.mean(distInv[distInv > 0])])

std = torch.stack([torch.std(dist[dist > 0]), torch.std(distInv[distInv > 0])])

# ## compute the mean and std for the samples (or simply)
示例#3
0
Ncells = 10
Np = 2
mu = 10
Nsamples = 2
minDelta = 0.1
Lcell = 1.0
L = Nce
Npoints = Ncells * Np

radious = 1.5
maxNumNeighs = 8

points, pot, forces = genDataYukawaPer(Ncells, Np, mu, Nsamples, minDelta,
                                       Lcell)

neighList = computInterListOpt(points, L, radious, maxNumNeighs)

positions = torch.tensor(points, dtype=torch.float32, requires_grad=True)

neighborList = torch.tensor(neighList, dtype=torch.int32)
# Nsamples, Npoint, MaxNumNeighs

# we build the Distance tensor

Dist = torch.tensor(np.zeros((Nsamples, Npoints, maxNumNeighs)),
                    dtype=torch.float32)
DistInv = torch.tensor(np.zeros((Nsamples, Npoints, maxNumNeighs)),
                       dtype=torch.float32)

mean = torch.tensor([0.0, 0.0], dtype=torch.float32)
std = torch.tensor([1.0, 1.0], dtype=torch.float32)