def evaluate():
    files = []
    for img_ext in config.ALLOWED_EXTENSIONS:
        files.extend(
            glob(
                os.path.join(config.IMAGES_UPLOADED_PATH,
                             "*.{}".format(img_ext))))
    files.sort(key=os.path.getctime, reverse=True)
    test_img = files[:1]

    test_dataset = dataset.ClassificationDataset(image_paths=test_img,
                                                 resize=(config.IMAGE_HEIGHT,
                                                         config.IMAGE_WIDTH))
    test_loader = DataLoader(
        test_dataset,
        batch_size=1,
        num_workers=0,
    )

    model = CaptchaModel(num_chars=len(lbl_enc.classes_))
    model.to(config.DEVICE)

    model.load_state_dict(
        load('./checkpoints/captcha_v1/captcha_v1.pth',
             map_location=config.DEVICE))
    model.eval()

    for data in test_loader:
        data["images"] = data["images"].to(config.DEVICE)
        prediction, _ = model(data["images"])
        prediction_output = decode_predictions(prediction, lbl_enc)
        return prediction_output
Exemple #2
0
def predict(captcha,
            model_dir='./model/model-latest.pkl',
            use_gpu=True,
            mode='captcha'):
    """

  :param captcha:
  :param model_dir:
  :param use_gpu:
  :param mode:
  :return:
  """
    gpu_available = torch.cuda.is_available()

    if mode == 'captcha':
        from model import CaptchaModel
    elif mode == 'kaptcha':
        from kaptcha_model import CaptchaModel
    else:
        return
    model = CaptchaModel()

    if use_gpu and gpu_available:
        model_state = torch.load(model_dir)
    else:
        model_state = torch.load(model_dir,
                                 map_location=lambda storage, loc: storage)

    model.load_state_dict(model_state['network'])

    if use_gpu and gpu_available:
        model = model.cuda()
    else:
        model = model.cpu()

    transformer = Compose(ToTensor())

    img_pil = Image.open(captcha)
    img_tensor = transformer.transforms(img_pil)

    model.eval()
    x = torch.stack([img_tensor])
    if use_gpu and gpu_available:
        x = x.cuda()
    pred1, pred2, pred3, pred4 = model(x)

    pred_seq = [
        torch.argmax(pred1).item(),
        torch.argmax(pred2).item(),
        torch.argmax(pred3).item(),
        torch.argmax(pred4).item()
    ]
    pred_seq = [item + 1 for item in pred_seq]

    _, id2label = get_dict()

    res = ''.join([id2label[i] for i in pred_seq])

    return res
Exemple #3
0
def get_predictions(image_path, model_path):
    classes = [
        '2', '3', '4', '5', '6', '7', '8', 'b', 'c', 'd', 'e', 'f', 'g', 'm',
        'n', 'p', 'w', 'x', 'y'
    ]
    le = preprocessing.LabelEncoder()
    le.fit(sorted(classes))
    n_classes = len(classes)

    model = CaptchaModel(num_chars=n_classes)
    model.load_state_dict(torch.load(model_path))
    model.eval()

    data = preproc_image(image_path)

    with torch.no_grad():
        preds, _ = model(**data)

    # Now decode the preds
    preds = decode_predictions(preds, le)
    preds = remove_blanks(preds)
    print(preds)
Exemple #4
0
def eval(model_dir,
         data_dir,
         batch_size=64,
         log_dir='./logs',
         use_gpu=True,
         mode='captcha'):
    """
  :param model_dir: 
  :param data_dir:
  :param batch_size:
  :param log_dir:
  :param use_gpu:
  :param mode:
  :return: 
  """
    x_test, y_test = get_data_split(data_dir, modes=['test'])
    if mode == 'captcha':
        from model import CaptchaModel
    elif mode == 'kaptcha':
        from kaptcha_model import CaptchaModel
    model = CaptchaModel()

    gpu_available = torch.cuda.is_available()

    if use_gpu and gpu_available:
        model = model.cuda()
        model_state = torch.load(model_dir)
    else:
        model_state = torch.load(model_dir,
                                 map_location=lambda storage, loc: storage)

    model.load_state_dict(model_state['network'])

    test_ds = CaptchaLoader((x_test, y_test), shuffle=True)

    test_loader = DataLoader(test_ds, batch_size=batch_size, shuffle=True)

    model.eval()

    acc_history = []
    with tqdm(total=int(np.ceil(len(test_loader.dataset) / batch_size)),
              desc='Eval') as eval_bar:
        for _, (x, y) in enumerate(test_loader):
            x = torch.tensor(x, requires_grad=False)
            y = torch.tensor(y, requires_grad=False)

            if use_gpu and gpu_available:
                x = x.cuda()
                y = y.cuda()

            pred1, pred2, pred3, pred4 = model(x)
            acc_mean = np.mean([
                acc(pred1, y[:, 0]),
                acc(pred2, y[:, 1]),
                acc(pred3, y[:, 2]),
                acc(pred4, y[:, 3])
            ])

            pred = torch.stack((pred1, pred2, pred3, pred4), dim=-1)
            multi_acc_mean = multi_acc(torch.argmax(pred, dim=1), y)

            acc_history.append([acc_mean.item(), multi_acc_mean])

            eval_bar.update()
            eval_bar.set_postfix(acc=acc_mean, multi_acc=multi_acc_mean)

    if not os.path.exists(log_dir):
        os.mkdir(log_dir)
    with open(os.path.join(log_dir, 'eval.json'), mode=r'w') as out_fp:
        json.dump(acc_history, out_fp)