Example #1
0
def test_BMN(data_loader, model, epoch, writer, opt):
    model.eval()
    epoch_pem_loss = 0
    epoch_tem_loss = 0
    epoch_loss = 0
    for n_iter, (input_data, label_start, label_end, label_confidence) in enumerate(data_loader):
        input_data = input_data.cuda()
        label_start = label_start.cuda()
        label_end = label_end.cuda()
        label_confidence = label_confidence.cuda()

        start_end, confidence_map = model(input_data)
        tem_loss = TEM_loss_function(label_start, label_end, start_end, opt)
        pem_loss = PEM_loss_function(label_confidence, confidence_map, confidence_mask, opt)
        loss = tem_loss + pem_loss

        epoch_pem_loss += pem_loss.cpu().detach().numpy()
        epoch_tem_loss += tem_loss.cpu().detach().numpy()
        epoch_loss += loss.cpu().detach().numpy()

    writer.add_scalars('data/pem_loss', {'train': epoch_pem_loss / (n_iter + 1)}, epoch)
    writer.add_scalars('data/tem_loss', {'train': epoch_tem_loss / (n_iter + 1)}, epoch)
    writer.add_scalars('data/total_loss', {'train': epoch_loss / (n_iter + 1)}, epoch)

    print("BMN testing loss(epoch %d): tem_loss: %.03f, pem_loss: %.03f, total_loss: %.03f" % (
        epoch, epoch_tem_loss / (n_iter + 1),
        epoch_pem_loss / (n_iter + 1),
        epoch_loss / (n_iter + 1)))

    state = {'epoch': epoch + 1,
             'state_dict': model.state_dict()}
    torch.save(state, opt["checkpoint_path"] + "/BMN_checkpoint.pth.tar")
    if epoch_loss < model.best_loss:
        model.best_loss = epoch_loss
        torch.save(state, opt["checkpoint_path"] + "/BMN_best.pth.tar")
Example #2
0
def train_BMN(data_loader, model, optimizer, epoch, writer, opt):
    model.train()
    epoch_pem_loss = 0
    epoch_tem_loss = 0
    epoch_loss = 0
    for n_iter, (input_data, label_start, label_end, label_confidence) in enumerate(data_loader):
        input_data = input_data.cuda()
        label_start = label_start.cuda()
        label_end = label_end.cuda()
        label_confidence = label_confidence.cuda()

        start_end, confidence_map = model(input_data)
        tem_loss = TEM_loss_function(label_start, label_end, start_end, opt)
        pem_loss = PEM_loss_function(label_confidence, confidence_map, confidence_mask, opt)
        loss = tem_loss + pem_loss
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        epoch_pem_loss += pem_loss.cpu().detach().numpy()
        epoch_tem_loss += tem_loss.cpu().detach().numpy()
        epoch_loss += loss.cpu().detach().numpy()

    writer.add_scalars('data/pem_loss', {'train': epoch_pem_loss / (n_iter + 1)}, epoch)
    writer.add_scalars('data/tem_loss', {'train': epoch_tem_loss / (n_iter + 1)}, epoch)
    writer.add_scalars('data/total_loss', {'train': epoch_loss / (n_iter + 1)}, epoch)

    print("BMN training loss(epoch %d): tem_loss: %.03f, pem_loss: %.03f, total_loss: %.03f" % (
        epoch, epoch_tem_loss / (n_iter + 1),
        epoch_pem_loss / (n_iter + 1),
        epoch_loss / (n_iter + 1)))