Esempio n. 1
0
def validate(model, dataloader, criterion):
    """
    Compute the loss and accuracy of a model on some validation dataset.

    Args:
        model: A torch module for which the loss and accuracy must be
            computed.
        dataloader: A DataLoader object to iterate over the validation data.
        criterion: A loss criterion to use for computing the loss.
        epoch: The number of the epoch for which validation is performed.
        device: The device on which the model is located.

    Returns:
        epoch_time: The total time to compute the loss and accuracy on the
            entire validation set.
        epoch_loss: The loss computed on the entire validation set.
        epoch_accuracy: The accuracy computed on the entire validation set.
    """

    # Switch to evaluate mode.
    model.eval()
    device = model.device

    epoch_start = time.time()
    running_loss = 0.0
    running_accuracy = 0.0
    total_num = 0
    sub_len = 0

    bc = BertClient(check_length=False)
    batch = dataloader
    # Deactivate autograd for evaluation.
    with torch.no_grad():
        for batch_index in range(len(dataloader['labels'])):
            # Move input and output data to the GPU if one is used.
            # try:
            premises = torch.tensor(bc.encode(
                batch["premises"][batch_index])).to(device)
            hypotheses = torch.tensor(
                bc.encode(batch["hypotheses"][batch_index])).to(device)
            labels = torch.tensor(batch["labels"][batch_index]).to(device)

            logits, probs, adv_logits = model(premises, hypotheses)

            # print(logits.size())
            loss = criterion(logits, labels)

            running_loss += loss.item()
            running_accuracy += correct_predictions(probs, labels)
            total_num += len(labels)
            # except:
            #     sub_len += 1
            #     print('encoding error!')

    epoch_time = time.time() - epoch_start
    epoch_loss = running_loss / (len(dataloader['labels']) - sub_len)
    epoch_accuracy = running_accuracy / total_num

    return epoch_time, epoch_loss, epoch_accuracy
Esempio n. 2
0
def test(model, dataloader):
    """
    Test the accuracy of a model on some labelled test dataset.

    Args:
        model: The torch module on which testing must be performed.
        dataloader: A DataLoader object to iterate over some dataset.

    Returns:
        batch_time: The average time to predict the classes of a batch.
        total_time: The total time to process the whole dataset.
        accuracy: The accuracy of the model on the input data.
    """
    # Switch the model to eval mode.
    model.eval()
    device = model.device

    time_start = time.time()
    batch_time = 0.0
    accuracy = 0.0

    all_labels = []
    all_out_classes = []

    # Deactivate autograd for evaluation.
    with torch.no_grad():
        for batch in dataloader:
            batch_start = time.time()

            # Move input and output data to the GPU if one is used.
            premises = batch["premise"].to(device)
            premises_lengths = batch["premise_length"].to(device)
            hypotheses = batch["hypothesis"].to(device)
            hypotheses_lengths = batch["hypothesis_length"].to(device)
            labels = batch["label"]
            all_labels.extend(labels.tolist())
            labels = labels.to(device)

            _, probs = model(premises,
                             premises_lengths,
                             hypotheses,
                             hypotheses_lengths)
            _, out_classes = probs.max(dim=1)
            all_out_classes.extend(out_classes.tolist())

            accuracy += correct_predictions(probs, labels)
            batch_time += time.time() - batch_start

    batch_time /= len(dataloader)
    total_time = time.time() - time_start
    accuracy /= (len(dataloader.dataset))
    accuracy_score = metrics.accuracy_score(all_labels, all_out_classes)
    precision_score = metrics.precision_score(all_labels, all_out_classes)
    recall_score = metrics.recall_score(all_labels, all_out_classes)
    f1_score = metrics.f1_score(all_labels, all_out_classes)

    return batch_time, total_time, accuracy, accuracy_score, precision_score, recall_score, f1_score
Esempio n. 3
0
def validate(model, dataloader, criterion):
    """
    Compute the loss and accuracy of a model on some validation dataset.

    Args:
        model: A torch module for which the loss and accuracy must be
            computed.
        dataloader: A DataLoader object to iterate over the validation data.
        criterion: A loss criterion to use for computing the loss.
        epoch: The number of the epoch for which validation is performed.
        device: The device on which the model is located.

    Returns:
        epoch_time: The total time to compute the loss and accuracy on the
            entire validation set.
        epoch_loss: The loss computed on the entire validation set.
        epoch_accuracy: The accuracy computed on the entire validation set.
    """
    # Switch to evaluate mode.
    model.eval()
    device = model.device

    epoch_start = time.time()
    running_loss = 0.0
    running_accuracy = 0.0

    # Deactivate autograd for evaluation.
    with torch.no_grad():
        for batch in dataloader:
            # Move input and output data to the GPU if one is used.
            premises = batch["premise"].to(device)
            premises_lengths = batch["premise_length"].to(device)
            hypotheses = batch["hypothesis"].to(device)
            hypotheses_lengths = batch["hypothesis_length"].to(device)
            labels = batch["label"].to(device)

            logits, probs, _, _ = model(premises,
                                  premises_lengths,
                                  hypotheses,
                                  hypotheses_lengths)
            loss = criterion(logits, labels)

            running_loss += loss.item()
            running_accuracy += correct_predictions(probs, labels)

    epoch_time = time.time() - epoch_start
    epoch_loss = running_loss / len(dataloader)
    epoch_accuracy = running_accuracy / (len(dataloader.dataset))

    return epoch_time, epoch_loss, epoch_accuracy
Esempio n. 4
0
def train_loss(model, dataloader, optimizer, criterion, epoch_number,
               max_gradient_norm):
    """
    Train a model for one epoch on some input data with a given optimizer and
    criterion.

    Args:
        model: A torch module that must be trained on some input data.
        dataloader: A DataLoader object to iterate over the training data.
        optimizer: A torch optimizer to use for training on the input model.
        criterion: A loss criterion to use for training.
        epoch_number: The number of the epoch for which training is performed.
        max_gradient_norm: Max. norm for gradient norm clipping.

    Returns:
        epoch_time: The total time necessary to train the epoch.
        epoch_loss: The training loss computed for the epoch.
        epoch_accuracy: The accuracy computed for the epoch.
    """
    # Switch the model to train mode.
    model[0].train()
    model[1].train()
    device = model[0].device

    epoch_start = time.time()
    batch_time_avg = 0.0
    running_loss = 0.0
    correct_preds = 0
    total_num = 0
    sub_len = 0
    loss_list = []

    bc = BertClient(check_length=False)
    batch = dataloader
    tqdm_batch_iterator = tqdm(range(len(dataloader['labels'])))
    for batch_index in tqdm_batch_iterator:
        batch_start = time.time()

        # Move input and output data to the GPU if it is used.
        premises = torch.tensor(bc.encode(
            batch["premises"][batch_index])).to(device)
        hypotheses = torch.tensor(bc.encode(
            batch["hypotheses"][batch_index])).to(device)
        labels = torch.tensor(batch["labels"][batch_index]).to(device)

        _, probabilities, esim_logits = model[0](premises, hypotheses)
        preds = torch.argmax(probabilities, dim=1)
        premises_adv, hypotheses_adv = fgsm(premises,
                                            hypotheses,
                                            preds,
                                            model[0],
                                            criterion,
                                            eps=2e-2)
        _, _, adv_logits = model[0](premises_adv, hypotheses_adv)
        vulnerability = torch.cat(
            [esim_logits - adv_logits, esim_logits, adv_logits], dim=1)

        logits, probs = model[1](premises, hypotheses, vulnerability)
        optimizer.zero_grad()
        loss = criterion(logits, labels)
        loss.backward()

        # nn.utils.clip_grad_norm_(model[0].parameters(), max_gradient_norm)
        nn.utils.clip_grad_norm_(model[1].parameters(), max_gradient_norm)
        optimizer.step()

        batch_time_avg += time.time() - batch_start
        running_loss += loss.item()
        loss_list.append(loss.item())
        correct_preds += correct_predictions(probs, labels)
        total_num += len(labels)

        description = "Avg. batch proc. time: {:.4f}s, loss: {:.4f}"\
                      .format(batch_time_avg/(batch_index+1),
                              running_loss/(batch_index+1))
        tqdm_batch_iterator.set_description(description)

    return loss_list
Esempio n. 5
0
def validate(model, tokenizer, dataloader):
    """
    Compute the loss and accuracy of a model on some validation dataset.

    Args:
        model: A torch module for which the loss and accuracy must be
            computed.
        dataloader: A DataLoader object to iterate over the validation data.
        criterion: A loss criterion to use for computing the loss.
        epoch: The number of the epoch for which validation is performed.
        device: The device on which the model is located.

    Returns:
        epoch_time: The total time to compute the loss and accuracy on the
            entire validation set.
        epoch_loss: The loss computed on the entire validation set.
        epoch_accuracy: The accuracy computed on the entire validation set.
    """

    # Switch to evaluate mode.
    model.eval()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    epoch_start = time.time()
    running_loss = 0.0
    running_accuracy = 0.0
    total_num = 0
    sub_len = 0

    batch = dataloader
    # Deactivate autograd for evaluation.
    with torch.no_grad():
        for batch_index in range(len(dataloader['labels'])):
            input_ids = []
            max_length = 0
            pad_token = 0
            labels = torch.tensor(batch["labels"][batch_index]).to(device)
            for i in range(len(labels)):
                inputs = tokenizer.encode(batch["premises"][batch_index][i],
                                          batch["hypotheses"][batch_index],
                                          add_special_tokens=True,
                                          max_length=128)
                input_ids.append(inputs)
                max_length = max(max_length, len(inputs))
            input_ids_new = []
            # pad on right
            for inputs in input_ids:
                padding_length = max_length - len(inputs)
                inputs = inputs + ([pad_token] * padding_length)
                input_ids_new.append(inputs)

            input_ids = torch.tensor(input_ids_new).to(device)
            outputs = model(input_ids, labels=labels)
            loss, logits = outputs[:2]

            running_loss += loss.item()
            running_accuracy += correct_predictions(F.softmax(logits, dim=-1),
                                                    labels)
            total_num += len(labels)

    epoch_time = time.time() - epoch_start
    epoch_loss = running_loss / (len(dataloader['labels']) - sub_len)
    epoch_accuracy = running_accuracy / total_num

    return epoch_time, epoch_loss, epoch_accuracy
Esempio n. 6
0
def train(model, tokenizer, dataloader, optimizer, scheduler, max_grad_norm):
    """
    Train a model for one epoch on some input data with a given optimizer and
    criterion.

    Args:
        model: A torch module that must be trained on some input data.
        dataloader: A DataLoader object to iterate over the training data.
        optimizer: A torch optimizer to use for training on the input model.
        criterion: A loss criterion to use for training.
        epoch_number: The number of the epoch for which training is performed.
        max_gradient_norm: Max. norm for gradient norm clipping.

    Returns:
        epoch_time: The total time necessary to train the epoch.
        epoch_loss: The training loss computed for the epoch.
        epoch_accuracy: The accuracy computed for the epoch.
    """
    # Switch the model to train mode.
    model.train()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    epoch_start = time.time()
    batch_time_avg = 0.0
    running_loss = 0.0
    correct_preds = 0
    total_num = 0
    sub_len = 0

    batch = dataloader
    tqdm_batch_iterator = tqdm(range(len(dataloader['labels'])))
    for batch_index in tqdm_batch_iterator:
        batch_start = time.time()

        input_ids = []
        max_length = 0
        pad_token = 0
        labels = torch.tensor(batch["labels"][batch_index]).to(device)
        for i in range(len(labels)):
            inputs = tokenizer.encode(batch["premises"][batch_index][i],
                                      batch["hypotheses"][batch_index],
                                      add_special_tokens=True,
                                      max_length=128)
            input_ids.append(inputs)
            max_length = max(max_length, len(inputs))
        input_ids_new = []
        # pad on right
        for inputs in input_ids:
            padding_length = max_length - len(inputs)
            inputs = inputs + ([pad_token] * padding_length)
            input_ids_new.append(inputs)

        input_ids = torch.tensor(input_ids_new).to(device)
        outputs = model(input_ids, labels=labels)
        loss, logits = outputs[:2]

        optimizer.zero_grad()
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
        optimizer.step()
        scheduler.step()

        batch_time_avg += time.time() - batch_start
        running_loss += loss.item()
        correct_preds += correct_predictions(F.softmax(logits, dim=-1), labels)
        total_num += len(labels)

        description = "Avg. batch proc. time: {:.4f}s, loss: {:.4f}"\
                      .format(batch_time_avg/(batch_index+1),
                              running_loss/(batch_index+1))
        tqdm_batch_iterator.set_description(description)

    epoch_time = time.time() - epoch_start
    epoch_loss = running_loss / (len(dataloader['labels']) - sub_len)
    epoch_accuracy = correct_preds / total_num

    return epoch_time, epoch_loss, epoch_accuracy
Esempio n. 7
0
def train(model, dataloader, optimizer, criterion, epoch_number,
          max_gradient_norm):
    """
    Train a model for one epoch on some input data with a given optimizer and
    criterion.

    Args:
        model: A torch module that must be trained on some input data.
        dataloader: A DataLoader object to iterate over the training data.
        optimizer: A torch optimizer to use for training on the input model.
        criterion: A loss criterion to use for training.
        epoch_number: The number of the epoch for which training is performed.
        max_gradient_norm: Max. norm for gradient norm clipping.

    Returns:
        epoch_time: The total time necessary to train the epoch.
        epoch_loss: The training loss computed for the epoch.
        epoch_accuracy: The accuracy computed for the epoch.
    """
    # Switch the model to train mode.
    model.train()
    device = model.device

    epoch_start = time.time()
    batch_time_avg = 0.0
    running_loss = 0.0
    correct_preds = 0
    total_num = 0
    sub_len = 0

    bc = BertClient(check_length=False)
    batch = dataloader
    tqdm_batch_iterator = tqdm(range(len(dataloader['labels'])))
    for batch_index in tqdm_batch_iterator:
        batch_start = time.time()

        # try:
        # Move input and output data to the GPU if it is used.
        premises = torch.tensor(bc.encode(
            batch["premises"][batch_index])).to(device)
        hypotheses = torch.tensor(bc.encode(
            batch["hypotheses"][batch_index])).to(device)
        labels = torch.tensor(batch["labels"][batch_index]).to(device)

        optimizer.zero_grad()

        logits, probs, adv_logits = model(premises, hypotheses)
        loss = criterion(logits, labels)
        loss.backward()

        nn.utils.clip_grad_norm_(model.parameters(), max_gradient_norm)
        optimizer.step()

        batch_time_avg += time.time() - batch_start
        running_loss += loss.item()
        correct_preds += correct_predictions(probs, labels)
        total_num += len(labels)

        description = "Avg. batch proc. time: {:.4f}s, loss: {:.4f}"\
                      .format(batch_time_avg/(batch_index+1),
                              running_loss/(batch_index+1))
        tqdm_batch_iterator.set_description(description)
        # except:
        #     sub_len += 1
        #     print('encoding error!')

    epoch_time = time.time() - epoch_start
    epoch_loss = running_loss / (len(dataloader['labels']) - sub_len)
    epoch_accuracy = correct_preds / total_num

    return epoch_time, epoch_loss, epoch_accuracy
Esempio n. 8
0
def validate(model, dataloader):
    """
    Compute the loss and accuracy of a model on some validation dataset.

    Args:
        model: A torch module for which the loss and accuracy must be
            computed.
        dataloader: A DataLoader object to iterate over the validation data.
        criterion: A loss criterion to use for computing the loss.
        epoch: The number of the epoch for which validation is performed.
        device: The device on which the model is located.

    Returns:
        epoch_time: The total time to compute the loss and accuracy on the
            entire validation set.
        epoch_loss: The loss computed on the entire validation set.
        epoch_accuracy: The accuracy computed on the entire validation set.
    """
    criterion = nn.CrossEntropyLoss(reduction='none')
    criterion_all = nn.CrossEntropyLoss()

    # Switch to evaluate mode.
    adv_loss_pos, adv_loss_neg = None, None

    model.train()
    device = model.device

    epoch_start = time.time()
    running_loss = 0.0
    running_accuracy = 0.0
    total_num = 0

    # Deactivate autograd for evaluation.
    for batch_index, batch in enumerate(dataloader):
        premises = batch["premise"].to(device)
        premises_lengths = batch["premise_length"].to(device)
        hypotheses = batch["hypothesis"].to(device)
        hypotheses_lengths = batch["hypothesis_length"].to(device)
        labels = batch["label"].to(device)

        logits, probs, _, embed = model(premises, premises_lengths, hypotheses,
                                        hypotheses_lengths)
        preds = torch.argmax(probs, dim=1)

        premises_adv, hypotheses_adv = fgsm_esim(embed[0],
                                                 embed[1],
                                                 preds,
                                                 model,
                                                 criterion_all,
                                                 premises_lengths,
                                                 hypotheses_lengths,
                                                 embed[2],
                                                 embed[3],
                                                 eps=2e-2,
                                                 if_infnity=False)
        logits_adv, probs_adv, _, _ = model(premises_adv, premises_lengths,
                                            hypotheses_adv, hypotheses_lengths,
                                            True, embed[2], embed[3])

        loss = criterion(logits, labels)
        running_loss += loss.sum().item()
        total_num += len(labels)

        np_labels = labels.cpu().numpy()
        np_loss = loss.detach().cpu().numpy()

        running_accuracy += correct_predictions(probs, labels)
        adv_loss = ShannonEntropy(logits_adv, probs)
        # adv_loss = criterion(logits_adv, preds) #- criterion(logits, preds)
        np_adv_loss = adv_loss.detach().cpu().numpy()
        # np_probs_adv = torch.max(probs_adv, dim=1)[0].detach().cpu().numpy()

        if batch_index == 0:
            adv_loss_pos = np_adv_loss[np_labels == 1]
            adv_loss_neg = np_adv_loss[np_labels == 0]
        else:
            adv_loss_pos = np.concatenate(
                (adv_loss_pos, np_adv_loss[(np_labels == 1)]), axis=0)
            adv_loss_neg = np.concatenate(
                (adv_loss_neg, np_adv_loss[(np_labels == 0)]), axis=0)

    epoch_time = time.time() - epoch_start
    epoch_accuracy = running_accuracy / (len(dataloader.dataset))
    # epoch_accuracy = running_accuracy / total_num
    print(np.mean(adv_loss_pos), np.mean(adv_loss_neg))

    losses = np.concatenate((adv_loss_pos, adv_loss_neg), axis=0)
    labels = np.concatenate(
        (np.ones_like(adv_loss_pos), np.zeros_like(adv_loss_neg)), axis=0)
    auc_score = roc_auc(labels, losses)
    creterion_func(adv_loss_pos, adv_loss_neg, loss_num_respectively=400)
    print('[ROC_AUC] score: %.2f%%' % (100. * auc_score))

    return epoch_time, epoch_accuracy
Esempio n. 9
0
def validate(model, dataloader):
    """
    Compute the loss and accuracy of a model on some validation dataset.

    Args:
        model: A torch module for which the loss and accuracy must be
            computed.
        dataloader: A DataLoader object to iterate over the validation data.
        criterion: A loss criterion to use for computing the loss.
        epoch: The number of the epoch for which validation is performed.
        device: The device on which the model is located.

    Returns:
        epoch_time: The total time to compute the loss and accuracy on the
            entire validation set.
        epoch_loss: The loss computed on the entire validation set.
        epoch_accuracy: The accuracy computed on the entire validation set.
    """
    criterion = nn.CrossEntropyLoss(reduction='none')
    criterion_all=nn.CrossEntropyLoss()
    l2dist = PairwiseDistance(2)

    # Switch to evaluate mode.
    running_loss_pos, running_loss_neg = 0.0, 0.0
    adv_loss_pos, adv_loss_neg = None, None

    model.train()
    device = model.device

    epoch_start = time.time()
    running_loss = 0.0
    running_accuracy = 0.0
    total_num = 0

    bc = BertClient(check_length=False)
    batch = dataloader
    # Deactivate autograd for evaluation.
    for batch_index in range(len(dataloader['labels'])):
        # Move input and output data to the GPU if one is used.
        premises = torch.tensor(bc.encode(batch["premises"][batch_index])).to(device)
        hypotheses = torch.tensor(bc.encode(batch["hypotheses"][batch_index])).to(device)
        labels = torch.tensor(batch["labels"][batch_index]).to(device)

        logits, probs, _ = model(premises, hypotheses)
        pred = torch.argmax(logits, dim=1)

        loss = criterion(logits, labels)

        running_loss += loss.sum().item()
        total_num += len(labels)

        np_labels = labels.cpu().numpy()
        np_loss = loss.detach().cpu().numpy()
        np_pred = pred.detach().cpu().numpy()

        # adv
        # premises_adv, hypotheses_adv = fgsm(premises, hypotheses, pred, model, criterion_all, eps=2e-2, if_infnity=True)
        premises_adv, hypotheses_adv = fgsm(premises, hypotheses, pred, model, criterion_all, eps=1e-1)
        logits_adv, probs_adv, _ = model(premises_adv, hypotheses_adv)

        running_accuracy += correct_predictions(probs, labels)

        # adv_loss = ShannonEntropy(logits_adv, probs)
        adv_loss = criterion(logits_adv, pred)
        np_adv_loss = adv_loss.detach().cpu().numpy()

        running_loss_pos += np_adv_loss[(np_labels==1)].sum() # &(np_pred==0)
        running_loss_neg += np_adv_loss[(np_labels==0)].sum() # &(np_pred==1)

        if batch_index == 0:
            adv_loss_pos = np_adv_loss[np_labels==1]
            adv_loss_neg = np_adv_loss[np_labels==0]
        else:
            adv_loss_pos = np.concatenate((adv_loss_pos, np_adv_loss[(np_labels==1)]), axis=0)
            adv_loss_neg = np.concatenate((adv_loss_neg, np_adv_loss[(np_labels==0)]), axis=0)

    epoch_time = time.time() - epoch_start
    epoch_accuracy = running_accuracy / total_num
    print(running_loss_pos, running_loss_neg)

    losses = np.concatenate((adv_loss_pos, adv_loss_neg), axis=0)
    labels = np.concatenate((np.ones_like(adv_loss_pos), np.zeros_like(adv_loss_neg)), axis=0)
    auc_score = roc_auc(labels, losses)
    creterion_func(adv_loss_pos, adv_loss_neg)
    print('[ROC_AUC] score: %.2f%%' % (100. * auc_score))

    return epoch_time, epoch_accuracy
Esempio n. 10
0
def train(model, dataloader, optimizer, criterion, epoch_number,
          max_gradient_norm):
    """
    Train a model for one epoch on some input data with a given optimizer and
    criterion.

    Args:
        model: A torch module that must be trained on some input data.
        dataloader: A DataLoader object to iterate over the training data.
        optimizer: A torch optimizer to use for training on the input model.
        criterion: A loss criterion to use for training.
        epoch_number: The number of the epoch for which training is performed.
        max_gradient_norm: Max. norm for gradient norm clipping.

    Returns:
        epoch_time: The total time necessary to train the epoch.
        epoch_loss: The training loss computed for the epoch.
        epoch_accuracy: The accuracy computed for the epoch.
    """
    # Switch the model to train mode.
    model[0].train()
    model[1].train()
    device = model[0].device

    epoch_start = time.time()
    batch_time_avg = 0.0
    running_loss = 0.0
    correct_preds = 0

    tqdm_batch_iterator = tqdm(dataloader)
    for batch_index, batch in enumerate(tqdm_batch_iterator):
        batch_start = time.time()

        # Move input and output data to the GPU if it is used.
        premises = batch["premise"].to(device)
        premises_lengths = batch["premise_length"].to(device)
        hypotheses = batch["hypothesis"].to(device)
        hypotheses_lengths = batch["hypothesis_length"].to(device)
        labels = batch["label"].to(device)

        _, probabilities, esim_logits, embed = model[0](premises,
                                                        premises_lengths,
                                                        hypotheses,
                                                        hypotheses_lengths)
        preds = torch.argmax(probabilities, dim=1)
        premises_adv, hypotheses_adv = fgsm_esim(embed[0],
                                                 embed[1],
                                                 preds,
                                                 model[0],
                                                 criterion,
                                                 premises_lengths,
                                                 hypotheses_lengths,
                                                 embed[2],
                                                 embed[3],
                                                 eps=2e-2,
                                                 if_infnity=False)
        _, _, adv_logits, _ = model[0](premises_adv, premises_lengths,
                                       hypotheses_adv, hypotheses_lengths,
                                       True, embed[2], embed[3])
        vulnerability = torch.cat(
            [esim_logits - adv_logits, esim_logits, adv_logits], dim=1)

        logits, probs = model[1](premises_adv, premises_lengths,
                                 hypotheses_adv, hypotheses_lengths,
                                 vulnerability, embed[2], embed[3])

        optimizer.zero_grad()
        loss = criterion(logits, labels)
        loss.backward()

        # nn.utils.clip_grad_norm_(model[0].parameters(), max_gradient_norm)
        nn.utils.clip_grad_norm_(model[1].parameters(), max_gradient_norm)
        optimizer.step()

        batch_time_avg += time.time() - batch_start
        running_loss += loss.item()
        correct_preds += correct_predictions(probs, labels)

        description = "Avg. batch proc. time: {:.4f}s, loss: {:.4f}"\
                      .format(batch_time_avg/(batch_index+1),
                              running_loss/(batch_index+1))
        tqdm_batch_iterator.set_description(description)

    epoch_time = time.time() - epoch_start
    epoch_loss = running_loss / len(dataloader)
    epoch_accuracy = correct_preds / len(dataloader.dataset)

    return epoch_time, epoch_loss, epoch_accuracy
Esempio n. 11
0
def validate(model, dataloader, criterion):
    """
    Compute the loss and accuracy of a model on some validation dataset.

    Args:
        model: A torch module for which the loss and accuracy must be
            computed.
        dataloader: A DataLoader object to iterate over the validation data.
        criterion: A loss criterion to use for computing the loss.
        epoch: The number of the epoch for which validation is performed.
        device: The device on which the model is located.

    Returns:
        epoch_time: The total time to compute the loss and accuracy on the
            entire validation set.
        epoch_loss: The loss computed on the entire validation set.
        epoch_accuracy: The accuracy computed on the entire validation set.
    """
    # Switch to evaluate mode.
    model[0].train()
    model[1].eval()
    device = model[0].device

    epoch_start = time.time()
    running_loss = 0.0
    running_accuracy = 0.0

    # Deactivate autograd for evaluation.
    for batch in dataloader:
        # Move input and output data to the GPU if one is used.
        premises = batch["premise"].to(device)
        premises_lengths = batch["premise_length"].to(device)
        hypotheses = batch["hypothesis"].to(device)
        hypotheses_lengths = batch["hypothesis_length"].to(device)
        labels = batch["label"].to(device)

        _, probabilities, esim_logits, embed = model[0](premises,
                                                        premises_lengths,
                                                        hypotheses,
                                                        hypotheses_lengths)
        preds = torch.argmax(probabilities, dim=1)
        premises_adv, hypotheses_adv = fgsm_esim(embed[0], embed[1], preds,
                                                 model[0], criterion,
                                                 premises_lengths,
                                                 hypotheses_lengths, embed[2],
                                                 embed[3])
        _, _, adv_logits, _ = model[0](premises_adv, premises_lengths,
                                       hypotheses_adv, hypotheses_lengths,
                                       True, embed[2], embed[3])
        vulnerability = torch.cat(
            [esim_logits - adv_logits, esim_logits, adv_logits], dim=1)

        logits, probs = model[1](premises_adv, premises_lengths,
                                 hypotheses_adv, hypotheses_lengths,
                                 vulnerability, embed[2], embed[3])
        loss = criterion(logits, labels)

        running_loss += loss.item()
        running_accuracy += correct_predictions(probs, labels)

    epoch_time = time.time() - epoch_start
    epoch_loss = running_loss / len(dataloader)
    epoch_accuracy = running_accuracy / (len(dataloader.dataset))
    return epoch_time, epoch_loss, epoch_accuracy