def run_a_train_epoch(args, epoch, model, data_loader, loss_criterion, optimizer): model.train() train_meter = Meter() for batch_id, batch_data in enumerate(data_loader): smiles, bg, labels, masks = batch_data atom_feats = bg.ndata.pop(args['atom_data_field']) atom_feats, labels, masks = atom_feats.to(args['device']), \ labels.to(args['device']), \ masks.to(args['device']) logits = model(bg, atom_feats) # Mask non-existing labels loss = (loss_criterion(logits, labels) * (masks != 0).float()).mean() optimizer.zero_grad() loss.backward() optimizer.step() print('epoch {:d}/{:d}, batch {:d}/{:d}, loss {:.4f}'.format( epoch + 1, args['num_epochs'], batch_id + 1, len(data_loader), loss.item())) train_meter.update(logits, labels, masks) train_score = np.mean(train_meter.compute_metric(args['metric_name'])) print('epoch {:d}/{:d}, training {} {:.4f}'.format(epoch + 1, args['num_epochs'], args['metric_name'], train_score))
def run_a_train_epoch(args, epoch, model, data_loader, loss_criterion, optimizer): model.train() train_meter = Meter(args['train_mean'], args['train_std']) epoch_loss = 0 for batch_id, batch_data in enumerate(data_loader): indices, ligand_mols, protein_mols, bg, labels = batch_data labels, bg = labels.to(args['device']), bg.to(args['device']) prediction = model(bg) loss = loss_criterion(prediction, (labels - args['train_mean']) / args['train_std']) epoch_loss += loss.data.item() * len(indices) optimizer.zero_grad() loss.backward() optimizer.step() train_meter.update(prediction, labels) avg_loss = epoch_loss / len(data_loader.dataset) total_scores = { metric: train_meter.compute_metric(metric) for metric in args['metrics'] } msg = 'epoch {:d}/{:d}, training | loss {:.4f}'.format( epoch + 1, args['num_epochs'], avg_loss) msg = update_msg_from_scores(msg, total_scores) print(msg)
def run_an_eval_epoch(args, model, data_loader): model.eval() eval_meter = Meter() with torch.no_grad(): for batch_id, batch_data in enumerate(data_loader): smiles, bg, labels, masks = batch_data labels = labels.to(args['device']) prediction = regress(args, model, bg) eval_meter.update(prediction, labels, masks) total_score = np.mean(eval_meter.compute_metric(args['metric_name'])) return total_score
def run_an_eval_epoch(args, model, data_loader): model.eval() eval_meter = Meter() with torch.no_grad(): for batch_id, batch_data in enumerate(data_loader): smiles, bg, labels, masks = batch_data atom_feats = bg.ndata.pop(args['atom_data_field']) atom_feats, labels = atom_feats.to(args['device']), labels.to( args['device']) logits = model(bg, atom_feats) eval_meter.update(logits, labels, masks) return np.mean(eval_meter.compute_metric(args['metric_name']))
def run_an_eval_epoch(args, model, data_loader): model.eval() eval_meter = Meter(args['train_mean'], args['train_std']) with torch.no_grad(): for batch_id, batch_data in enumerate(data_loader): indices, ligand_mols, protein_mols, bg, labels = batch_data labels, bg = labels.to(args['device']), bg.to(args['device']) prediction = model(bg) eval_meter.update(prediction, labels) total_scores = { metric: eval_meter.compute_metric(metric) for metric in args['metrics'] } return total_scores
def run_a_train_epoch(args, epoch, model, data_loader, loss_criterion, optimizer): model.train() train_meter = Meter() for batch_id, batch_data in enumerate(data_loader): smiles, bg, labels, masks = batch_data labels, masks = labels.to(args['device']), masks.to(args['device']) prediction = regress(args, model, bg) loss = (loss_criterion(prediction, labels) * (masks != 0).float()).mean() optimizer.zero_grad() loss.backward() optimizer.step() train_meter.update(prediction, labels, masks) total_score = np.mean(train_meter.compute_metric(args['metric_name'])) print('epoch {:d}/{:d}, training {} {:.4f}'.format(epoch + 1, args['num_epochs'], args['metric_name'], total_score))