Пример #1
0
def test(loader):
    model.eval()
    error = 0
    loss = 0
    total_atoms = 0
    with torch.no_grad():
        for data in loader:
            data = data.to(device)
            atoms = data.mask.sum().item()
            pred = model(data)

            loss += loss_functions.MSE_loss(pred, data.y, data.mask)
            error += loss_functions.MAE_loss(pred, data.y, data.mask)
            total_atoms += atoms

        return float(error) / total_atoms, float(loss) / total_atoms
Пример #2
0
def train(epoch):
    model.train()
    loss_all = 0
    total = 0

    # note that the number of atoms exceeds the number of carbons, and therefore there will be many zeros
    for i, data in enumerate(train_loader):
        data = data.to(device)
        optimizer.zero_grad()
        target =  torch.FloatTensor(data.y).to(device)
        mask = torch.FloatTensor(data.mask).to(device)
        loss = loss_functions.MSE_loss(model(data), target, mask)
        loss.backward()
        loss_all += loss
        optimizer.step()
        total += 1
    return float(loss_all) / total
Пример #3
0
def train(epoch):
    model.train()
    loss_all = 0
    total_atoms = 0

    # note that the number of atoms exceeds the number of carbons, and therefore there will be many zeros
    for i, data in enumerate(train_loader):
        data = data.to(device)
        optimizer.zero_grad()
        atoms = data.mask.sum().item()
        pred = model(data)

        loss = loss_functions.MSE_loss(pred, data.y, data.mask)
        loss.backward()
        loss_all += loss
        optimizer.step()
        total_atoms += atoms
    return float(loss_all) / total_atoms