Пример #1
0
def valid(model, testing_loader, loss_fn):
    model.eval()
    n_correct = 0
    tr_loss = 0
    nb_tr_steps = 0
    nb_tr_examples = 0
    y_test_predicted = []
    y_test_predicted_prob_list = []
    y_test_actual = []
    softmax = torch.nn.Softmax(dim=1)
    with torch.no_grad():
        for _, data in enumerate(tqdm(testing_loader, 0)):
            ids = data['ids'].to(device, dtype=torch.long)
            mask = data['mask'].to(device, dtype=torch.long)
            targets = data['targets'].to(device, dtype=torch.long)
            outputs = model(ids, mask)
            # print("OUTPUTS: {}".format(outputs))
            # print("targets: {}".format(targets))
            loss = loss_fn(outputs, targets)
            tr_loss += loss.item()
            big_val, big_idx = torch.max(outputs.data, dim=1)
            y_test_predicted_prob = softmax(outputs.data)
            # print("y_test_predicted_prob: {}".format(y_test_predicted_prob))
            y_test_predicted_prob_list.append(y_test_predicted_prob.tolist())

            n_correct += utility.calculate_accuracy(big_idx, targets)
            # print("y_test_predicted: {}".format(big_idx))
            # print("type(y_test_predicted) : {}".format(type(big_idx)))
            # print("y_test_actual: {}".format(targets))
            # print("type(y_test_actual) : {}".format(type(targets)))
            y_test_predicted.extend(big_idx.tolist())
            y_test_actual.extend(targets.tolist())
            # print(y_test_predicted)
            # print(y_test_actual)
            # print(len(y_test_predicted))
            # print(len(y_test_actual))
            # print("*"*120)
            nb_tr_steps += 1
            nb_tr_examples += targets.size(0)

            if _ % 5000 == 0:
                loss_step = tr_loss / nb_tr_steps
                accu_step = (n_correct * 100) / nb_tr_examples
                print(f"Validation Loss per 100 steps: {loss_step}")
                print(f"Validation Accuracy per 100 steps: {accu_step}")
    epoch_loss = tr_loss / nb_tr_steps
    epoch_accu = (n_correct * 100) / nb_tr_examples
    print(f"Validation Loss Epoch: {epoch_loss}")
    print(f"Validation Accuracy Epoch: {epoch_accu}")

    return epoch_loss, epoch_accu, y_test_actual, y_test_predicted, y_test_predicted_prob_list
def retrain(model, training_loader, loss_fn, optimizer):
    tr_loss = 0
    n_correct = 0
    nb_tr_steps = 0
    nb_tr_examples = 0
    model.train()
    bot.telegram_bot_sendtext("re-training started for : " +
                              config.generic_path)
    for _, data in enumerate(tqdm(training_loader, 0)):
        ids = data['ids'].to(device, dtype=torch.long)
        mask = data['mask'].to(device, dtype=torch.long)
        targets = data['targets'].to(device, dtype=torch.long)

        # print("len(targets): ".format(len(targets)))
        # print("len(ids): ".format(len(targets)))
        # print("len(ids[0]): ".format(len(ids[0])))

        # print("*"*120)
        # print("ids: {}".format(ids))
        # print("mask: {}".format(mask))
        # print("targets: {}".format(targets))
        # print("*"*120)

        # Calling the created model
        # outputs, probability = model(ids, mask)
        outputs = model(ids, mask)
        # print("MODEL OUTPUTS: {}".format(outputs))
        # print("MODEL probability: {}".format(probability))
        loss = loss_fn(outputs, targets)
        # print("loss: {}".format(loss))
        tr_loss += loss.item()
        # print("loss.item(): {}".format(loss.item()))
        # print("outputs.data: {}".format(outputs.data))
        # print("torch.max(outputs.data, dim=1): {}".format(torch.max(outputs.data, dim=1)))
        big_val, big_idx = torch.max(outputs.data, dim=1)
        # print("big_idx: {}".format(big_idx))
        n_correct += utility.calculate_accuracy(big_idx, targets)
        # print("+"*120)

        nb_tr_steps += 1
        nb_tr_examples += targets.size(0)

        if _ % 5000 == 0:
            loss_step = tr_loss / nb_tr_steps
            accu_step = (n_correct * 100) / nb_tr_examples
            bot.telegram_bot_sendtext(config.generic_path)
            bot.telegram_bot_sendtext("Training Loss per 5000 steps: " +
                                      str(loss_step))
            bot.telegram_bot_sendtext("Training Accuracy per 5000 steps: " +
                                      str(accu_step))
            print(f"Training Loss per 5000 steps: {loss_step}")
            print(f"Training Accuracy per 5000 steps: {accu_step}")
            print(strftime("%Y-%m-%d %H:%M:%S", gmtime()))

        optimizer.zero_grad()
        loss.backward()
        nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        # # When using GPU
        optimizer.step()

    print(
        f'The Total Accuracy for Epoch {epoch}: {(n_correct * 100) / nb_tr_examples}'
    )
    epoch_loss = tr_loss / nb_tr_steps
    epoch_accu = (n_correct * 100) / nb_tr_examples

    bot.telegram_bot_sendtext(config.generic_path)
    bot.telegram_bot_sendtext("Final Training Loss Epoch " + str(epoch_loss))
    bot.telegram_bot_sendtext("Final Training Accuracy Epoch: " +
                              str(epoch_accu))
    bot.telegram_bot_sendtext("EPOCH completed {Re-training}")
    print(f"Training Loss Epoch: {epoch_loss}")
    print(f"Training Accuracy Epoch: {epoch_accu}")

    return epoch_loss, epoch_accu