Exemplo n.º 1
0
def eval_model():
    model = CRNN()
    model.load_state_dict(torch.load('./model_EEG.pt'))
    # specify the target classes
    classes = ('True', 'False')

    # track test loss
    test_loss = 0.0
    class_correct = list(0. for i in range(num_classes))
    class_total = list(0. for i in range(num_classes))

    model.eval()
    with torch.no_grad():
        for data, target in testloader:
            data, target = data, target
            target = target.long()
            output, _ = model(data)
            #print(output.data)
            # convert output probabilities to predicted class
            _, pred = torch.max(output, 1)
            # print(pred)
            # compare predictions to true label
            correct = (pred == target).squeeze()
            for i, label in enumerate(target):
                class_correct[label] += correct[i].item()
                class_total[label] += 1
        for i in range(len(classes)):
            print('Accuracy of %s : %2d%% out of %d cases' %
                  (classes[i], 100 * class_correct[i] / class_total[i], class_total[i]))

        data = next(iter(testloader))
        inputs, targets = data
        inputs = inputs
        targets = targets
        targets = targets.long()
        outputs, _ = model(inputs)
        probability, predicted = torch.max(outputs.data, 1)
        c = (predicted == targets).squeeze()

        eval_metrics = pd.DataFrame(np.empty([2, 4]))
        eval_metrics.index = ["baseline"] + ['RNN']
        eval_metrics.columns = ["Accuracy", "ROC AUC", "PR AUC", "Log Loss"]
        pred = np.repeat(0, len(y_test.cpu()))
        pred_proba = np.repeat(0.5, len(y_test.cpu()))
        eval_metrics.iloc[0, 0] = accuracy_score(y_test.cpu(), pred)
        eval_metrics.iloc[0, 1] = roc_auc_score(y_test.cpu(), pred_proba)
        eval_metrics.iloc[0, 2] = average_precision_score(y_test.cpu(), pred_proba)
        eval_metrics.iloc[0, 3] = log_loss(y_test.cpu(), pred_proba)
        eval_metrics.iloc[1, 0] = accuracy_score(y_test.cpu(), predicted.cpu())
        eval_metrics.iloc[1, 1] = roc_auc_score(y_test.cpu(), probability.cpu())
        eval_metrics.iloc[1, 2] = average_precision_score(y_test.cpu(), probability.cpu())
        eval_metrics.iloc[1, 3] = 0  # log_loss(y_test.cpu(), pred_proba[:, 1])

        print(eval_metrics)
Exemplo n.º 2
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    args = parse_args()

    noisy_mel = np.load(args.path_to_file)
    h, w = noisy_mel.shape
    noisy_mel = pad_mel_spectogram(noisy_mel)
    noisy_mel = torch.tensor(noisy_mel, dtype=torch.float32)
    noisy_mel = noisy_mel.unsqueeze(0)

    model = CRNN().to(device)
    model.load_state_dict(torch.load(args.path_to_model, map_location=device))
    model.eval()

    clean_mel = model(noisy_mel)
    clean_mel = clean_mel.squeeze(0)
    clean_mel = clean_mel.data.cpu().numpy()
    clean_mel = clean_mel[:h]

    save_dir = os.path.dirname(args.path_to_save)
    if save_dir and not os.path.exists(save_dir):
        os.mkdir(save_dir)
    np.save(args.path_to_save, clean_mel)
Exemplo n.º 3
0
model = CRNN(IMAGE_HEIGHT, 1, len(alphabet) + 1, 256)
if torch.cuda.is_available():
    model = model.cuda()
print('loading pretrained model from %s' % model_path)
model.load_state_dict(torch.load(model_path))

converter = LabelConverter(alphabet)

image_transform = transforms.Compose(
    [Rescale(IMAGE_HEIGHT),
     transforms.ToTensor(),
     Normalize()])
image = cv2.imread(img_path, 0)
image = image_transform(image)
if torch.cuda.is_available():
    image = image.cuda()
image = image.view(1, *image.size())
image = Variable(image)

model.eval()
preds = model(image)

_, preds = preds.max(2)
preds = preds.transpose(1, 0).contiguous().view(-1)

preds_size = Variable(torch.IntTensor([preds.size(0)]))
raw_pred = converter.decode(preds.data, preds_size.data, raw=True)
sim_pred = converter.decode(preds.data, preds_size.data, raw=False)
print('%-20s => %-20s' % (raw_pred, sim_pred))
Exemplo n.º 4
0
def main():
	parser = argparse.ArgumentParser(description='CRNN')
	parser.add_argument('--name', default='32x100', type=str)
	parser.add_argument('--exp', default='syn90k', type=str)
	
	## data setting 
	parser.add_argument('--root', default='/data/data/text_recognition/',type=str)
	parser.add_argument('--test_dataset', default='ic03', type=str)
	parser.add_argument('--load_width', default=100, type=int)
	parser.add_argument('--load_height', default=32, type=int)
	parser.add_argument('--batch_size', default=64, type=int)
	parser.add_argument('--num_workers', default=8, type=int)
	## model setting
	parser.add_argument('--snapshot', default='./weights/32x100/syn90k/3_51474.pth', type=str)
	parser.add_argument('--alphabeta', default='0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', type=str)
	parser.add_argument('--ignore_case', default=True, type=bool)
	## output setting
	parser.add_argument('--out_dir', default='./outputs', type=str)

	args = parser.parse_args()

	if os.path.exists(args.out_dir) == False:
		os.mkdir(args.out_dir)
	args.out_dir = os.path.join(args.out_dir, args.name)
	if os.path.exists(args.out_dir) == False:
		os.mkdir(args.out_dir)
	args.out_dir = os.path.join(args.out_dir, args.snapshot.strip().split('/')[-1].split('.')[0])
	if os.path.exists(args.out_dir) == False:
		os.mkdir(args.out_dir)

	if args.ignore_case:
		args.alphabeta = args.alphabeta[:36]
	args.nClasses = len(args.alphabeta) + 1

	log_path = os.path.join(args.out_dir, args.test_dataset + '.txt')
	setup_logger(log_path)

	logging.info('model will be evaluated on %s'%(args.test_dataset))
	testset =  SceneLoader(args, args.test_dataset, False)
	logging.info('%d test samples'%(testset.__len__()))
	test_loader = data.DataLoader(testset, args.batch_size, num_workers=args.num_workers,
	                              shuffle=False,  pin_memory=True)

	## model
	net = CRNN(args)
	print(net)
	input()
	net = torch.nn.DataParallel(net).cuda()
	print(net)
	net.load_state_dict(torch.load(args.snapshot))
	net = net.module
	net.eval()
	n_correct = 0
	n_samples = 0
	converter = strLabelConverter(args.alphabeta, args.ignore_case)

	for index, sample in enumerate(test_loader):
		# print('model state', net.training)
		# print('bn1 state', net.cnn[0].training)
		# print('conv1.weight', net.cnn[0].subnet[0].weight[0, 0, 0, 0])
		# print('bn1.weight', net.cnn[4].subnet[1].weight[0])
		# print('bn1.bias', net.cnn[4].subnet[1].weight[0])
		# print('bn1.running_mean', net.cnn[4].subnet[1].running_mean[0])
		# print('bn1.running_var', net.cnn[4].subnet[1].running_var[0])
		imgs, gts, lexicon50, lexicon1k, lexiconfull, img_paths = sample
		imgs = Variable(imgs).cuda()
		preds = net(imgs)
		preds_size = torch.IntTensor([preds.size(0)] * preds.size(1))
		## decode
		_, preds = preds.max(2)
		preds = preds.transpose(1, 0).contiguous().view(-1)
		text_preds = converter.decode(preds.data, preds_size, raw=False)
		for pred, target in zip(text_preds, gts):
			n_samples += 1
			if pred.lower() == target.lower():
				n_correct += 1
				logging.info('pred: %s  gt:%s '%(pred, target))
			else:
				logging.info('pred: %s  gt:%s  -----------------------------!!!!!!'%(pred, target))
	assert(n_samples == testset.__len__())
	acc = n_correct*1.0/testset.__len__()
	logging.info('accuracy=%f'%(acc))