def train(model, loader, criterion, optimizer, epoch, device, opt):

    model.train()

    train_loss = 0.0
    losses = AverageMeter()
    metric = Metric(opt.num_classes)
    for i, (imgs, spatial_locations, word_vectors, targets_predicates,
            targets_confidences) in enumerate(loader):
        # compute outputs
        imgs, spatial_locations, word_vectors, targets_confidences, targets_predicates = imgs.to(
            device), spatial_locations.to(device), word_vectors.to(
                device), targets_confidences.to(device), targets_predicates.to(
                    device)
        confidences, predicates = model(imgs, spatial_locations, word_vectors)

        # compute loss
        loss1 = criterion(confidences, targets_confidences)
        loss2 = criterion(predicates, targets_predicates)
        tot_loss = loss1 + loss2
        train_loss += tot_loss.item()

        losses.update(tot_loss.item(), imgs.size(0))
        predicates = torch.sigmoid(predicates)
        metric.update(predicates, targets_predicates)

        optimizer.zero_grad()
        tot_loss.backward()
        optimizer.step()

        # show information
        if (i + 1) % opt.log_interval == 0:
            avg_loss = train_loss / opt.log_interval
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, losses.count, len(loader.dataset),
                100. * (i + 1) / len(loader), avg_loss))
            train_loss = 0.0

    # show information
    recall = metric.compute_metrics()
    print('Train set ({:d} samples): Average loss: {:.4f}\tRecall: {:.4f}'.
          format(losses.count, losses.avg, recall))

    return losses.avg, recall
def validate(model, loader, criterion, epoch, device, opt):

    model.eval()

    losses = AverageMeter()
    metric = Metric(opt.num_classes)
    with torch.no_grad():
        for i, (imgs, spatial_locations, word_vectors, targets_predicates, targets_confidences) in enumerate(loader):
            # compute outputs
            imgs, spatial_locations, word_vectors, targets_confidences, targets_predicates = imgs.to(device), spatial_locations.to(
                device), word_vectors.to(device),  targets_confidences.to(device), targets_predicates.to(device)
            confidences, predicates = model(imgs, spatial_locations, word_vectors)

            # compute loss
            loss = criterion(predicates, targets_predicates)

            metric.update(predicates, targets_predicates)
            losses.update(loss.item(), imgs.size(0))


    # show information
    recall = metric.compute_metrics()
    print('Validation set ({:d} samples): Average loss: {:.4f}\tRecall: {:.4f}'.format(losses.count, losses.avg, recall))
    return losses.avg, recall