def main(): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model_path = "captcha-breaker-v%d.pth" % CaptchaNN.version() data_path = "./fetched" getImageUrl = "http://captcha.qq.com/getimage" downloadNum = 500 net = CaptchaNN() net = net.to(device) net.load_state_dict(torch.load(model_path)) net.eval() transform = CaptchaDataset.get_transform(224, 224) for i in range(0, downloadNum): file = "!unclassified.jpg" url = getImageUrl localPath = os.path.join(data_path, file) urllib.request.urlretrieve(url, localPath) pilImg = Image.open(localPath) img = transform(pilImg) img = CaptchaDataset.to_var(img) X = img.to(device) pred = CaptchaDataset.decode_label(net.predict(X)) os.rename(localPath, os.path.join(data_path, pred + ".jpg")) print("Downloaded and recognized as ", pred) pass
def main(): device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model_path = "captcha-breaker-v%d.pth" % CaptchaNN.version() data_path = "./datav2/test" net = CaptchaNN() net = net.to(device) net.load_state_dict(torch.load(model_path)) net.eval() #transform = dataset.CaptchaDataset.get_transform(224, 224) train_dataset = CaptchaDataset(data_path, 224, 224) trainIter = DataLoader(train_dataset, batch_size=1, num_workers=0, shuffle=True, drop_last=False) rightNum = 0 for i, (X, label) in enumerate(trainIter): X = X.to(device) label = label.to(device) label = label.long() label1 = label[:, 0] label2 = label[:, 1] label3 = label[:, 2] label4 = label[:, 3] y1, y2, y3, y4 = net(X) _, y1_pred = torch.max(y1.data, dim=1) _, y2_pred = torch.max(y2.data, dim=1) _, y3_pred = torch.max(y3.data, dim=1) _, y4_pred = torch.max(y4.data, dim=1) print( CaptchaDataset.decode_label((label1, label2, label3, label4)), CaptchaDataset.decode_label((y1_pred.item(), y2_pred.item(), y3_pred.item(), y4_pred.item()))) if label1 == y1_pred.item() and label2 == y2_pred.item( ) and label3 == y3_pred.item() and label4 == y4_pred.item(): rightNum += 1 print("RIGHT: %d/%d %f" % (rightNum, i, rightNum / i))
result = [] total_pred, total_given = [], [] for i, (images, labels) in enumerate(data_loader): t0 = time.time() with torch.no_grad(): inputs, targets = images.to(device), labels.to(device) logits = model(inputs) loss = F.cross_entropy(logits, targets) dist = F.log_softmax(logits, dim=1) pred = dist.max(1)[1] pred_label = "".join(dataset.decode_label(pred[0])) total_pred.extend(pred[0].cpu().numpy()) total_given.extend(labels[0].cpu().numpy()) result.append((loss.data.cpu(), dataset.image_names[i], pred_label)) for loss, name, label in sorted(result, key=lambda x: -x[0]): err = ' ' if label == name[:-4] else '*' print("{}\t{}\t{}\t{:.8f}".format(name, label, err, loss)) pr, gv = np.array(total_pred), np.array(total_given) car = np.mean(pr == gv) sar = np.mean(np.prod(pr.reshape(-1, 5), axis=1) == \ np.prod(gv.reshape(-1,5), axis=1))