예제 #1
0
def validate(model, dataset, batch_size=1):
    model.eval()
    with torch.no_grad():
        keys, text, truth = dataset.get_val_data(batch_size=batch_size)

        pred = model(text)

        for i, key in enumerate(keys):
            print_text, _ = dataset.val_dict[key]
            print_text_class = pred[:, i][: len(print_text)].cpu().numpy()
            color_print(print_text, print_text_class)
예제 #2
0
def validate(model, dataset, batch_size=1, print_size=10):
    model.eval()
    with torch.no_grad():
        keys, text, truth = dataset.get_val_data(batch_size=batch_size)

        oupt = model(text)
        prob = torch.nn.functional.softmax(oupt, dim=2)
        prob, pred = torch.max(prob, dim=2)

        prob = prob.cpu().numpy()
        pred = pred.cpu().numpy()

        class_acc = 0.0
        char_acc = 0.0

        for i, key in enumerate(keys):
            real_text, real_label = dataset.val_dict[key]
            result = pred_to_dict(real_text, pred[:, i], prob[:, i])
            ground_truth = truth_to_dict(real_text, real_label)

            class_acc_unit = calc_accuracy(result, ground_truth)
            char_acc_unit = compare_truth(result, ground_truth)
            class_acc += class_acc_unit
            char_acc += char_acc_unit

            if i < print_size:
                print("====== Val. number %d ======" % i)
                for k, v in result.items():
                    print(f"{k:>8}: {v}")
                print()

                for k, v in ground_truth.items():
                    print(f"{k:>8}: {v}")

                print("-ACCURACY(Class): %.2f" % class_acc_unit)
                print("-ACCURACY(Char) : %.2f" % char_acc_unit)
                print()

                color_print(real_text, pred[:, i])
                print("============================")
                print()

        print("=ACCURACY(Class): %.2f" % (class_acc * 100 / batch_size))
        print("=ACCURACY(Char) : %.2f" % (char_acc * 100 / batch_size))
예제 #3
0
def validate(model, dataset, batch_size=1):
    model.eval()
    with torch.no_grad():
        keys, text, truth = dataset.get_val_data(batch_size=batch_size)

        oupt = model(text)
        prob = torch.nn.functional.softmax(oupt, dim=2)
        prob, pred = torch.max(prob, dim=2)

        prob = prob.cpu().numpy()
        pred = pred.cpu().numpy()

        for i, key in enumerate(keys):
            real_text, _ = dataset.val_dict[key]
            result = pred_to_dict(real_text, pred[:, i], prob[:, i])

            for k, v in result.items():
                print(f"{k:>8}: {v}")

            color_print(real_text, pred[:, i])
예제 #4
0
def inference(text):
    text[0] = preprocess(text[0])
    device = torch.device("cpu")
    hidden_size = 256
    model = MyModel0(len(VOCAB), 16, hidden_size).to(device)
    model.load_state_dict(
        torch.load("model.pth", map_location=torch.device('cpu')))

    #text = ["shubham bisht, something happens"]
    text_tensor = torch.zeros(len(text[0]), 1, dtype=torch.long)
    text_tensor[:,
                0] = torch.LongTensor([VOCAB.find(c) for c in text[0].upper()])
    #print(text_tensor)
    inp = text_tensor.to(device)

    oupt = model(inp)
    prob = torch.nn.functional.softmax(oupt, dim=2)
    prob, pred = torch.max(prob, dim=2)

    color_print(text[0], pred)
    json = pred_to_dict(text[0], pred, prob)
    print("\n###########################\n")
    print(json)
    return json