def test_autoencoder(epoch_plus, text, index): use_gpu = torch.cuda.is_available() ngpu = torch.cuda.device_count() device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu") model = SegNet(3) if ngpu > 1: model = nn.DataParallel(model) if use_gpu: model = model.to(device, non_blocking=True) text = text.to(device, non_blocking=True) if epoch_plus > 0: model.load_state_dict(torch.load('./autoencoder_models_2/autoencoder_{}.pth'.format(epoch_plus))) model.eval() if use_gpu: text.to(device, non_blocking=True) predicted = model(text) predicted[predicted > 1.0] = 1.0 save_path1 = './results/text' save_path2 = './results/masked' if not os.path.exists(save_path1): os.mkdir(save_path1) if not os.path.exists(save_path2): os.mkdir(save_path2) binary_predicted = predicted.clone() binary_mask = predicted.clone() binary_predicted[binary_predicted > 0.0] = 1.0 binary_mask[binary_mask > 0.1] = 1.0 masked = text + binary_mask masked[masked > 1.0] = 1.0 trans = torchvision.transforms.ToPILImage() predicted = predicted.squeeze().cpu() masked = masked.squeeze().cpu() image = trans(predicted) image2 = trans(masked) image.save(os.path.join(save_path1, 'text_{}.png'.format(index))) image2.save(os.path.join(save_path2, 'masked_{}.png'.format(index))) del text del predicted del masked del binary_predicted
def main(): model_dir = './checkpoints/seg2/segnet_gen1/model_at_epoch_013.dat' save_dir = './test/0610/segnet_gen1/test' test_txt_path = './data/seg/valid.txt' # model = unet(in_channel=1, n_classes=1) model = SegNet(input_nbr = 1, label_nbr = 1) model = load_model(model, model_dir) model = model.cuda() model.eval() test_dataset = GuideWireDataset(test_txt_path) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS) prefetcher = data_prefetcher(test_loader) input, target, distance = prefetcher.next() dice = [] IoU = [] precision = [] recall = [] i = -1 while input is not None: i += 1 with torch.no_grad(): output = model(input) dice.append(dice_coeff(output, target).item()) IoU.append(iou_coeff(output, target).item()) precision.append(Precision(output, target).item()) recall.append(Recall(output, target).item()) output = torch.sigmoid(output).squeeze().data.cpu().numpy() output[output < 0.5] = 0 output[output >= 0.5] = 1 # output = torch.argmax(output, dim=1).squeeze().data.cpu().numpy() # output = output.squeeze().data.cpu().numpy() # output = np.argmax(output, axis=0) cv2.imwrite(os.path.join(save_dir, str(i) + '_output.jpg'), output * 255) print(str(i) + ' finish!') input, target, distance = prefetcher.next() print('dice: ', np.mean(dice), np.max(dice), np.min(dice), np.std(dice)) print('iou: ', np.mean(IoU), np.max(IoU), np.min(IoU), np.std(IoU)) print('precision: ', np.mean(precision), np.max(precision), np.min(precision), np.std(precision)) print('recall: ', np.mean(recall), np.max(recall), np.min(recall), np.std(recall))
def test(args): cfg = load_cfg(args.cfg) weight_path = args.wts img_path = args.im_path segnet = SegNet().float().cuda() segnet.load_state_dict(torch.load(weight_path)) segnet.eval() im = cv2.imread(img_path).transpose(2, 0, 1) im = torch.tensor(im[np.newaxis, :], dtype=torch.float).cuda() out = segnet(im) out = out.detach().cpu().numpy().transpose(0, 2, 3, 1) out = np.argmax(out, axis=3).astype(np.uint8)[0] out = out[:, :, np.newaxis] out = out * 20 cv2.imshow('f**k', out) cv2.waitKey(0)
def predict_image(dir): use_gpu = torch.cuda.is_available() ngpu = torch.cuda.device_count() device = torch.device("cuda:0" if ( torch.cuda.is_available() and ngpu > 0) else "cpu") image_to_tensor = torchvision.transforms.ToTensor() tensor_to_image = torchvision.transforms.ToPILImage() save_path = Path(dir).parent image = Image.open(dir).convert('RGB') image = image_to_tensor(image) c, w, h = image.shape image = torch.reshape(image, (1, c, w, h)) model = SegNet(3) if use_gpu: model = model.to(device, non_blocking=True) image = image.to(device, non_blocking=True) model.load_state_dict(torch.load('./models/model.pth', map_location=device)) model.eval() predicted = model(image) predicted[predicted > 1.0] = 1.0 binary_predicted = predicted.clone() binary_mask = predicted.clone() binary_predicted[binary_predicted > 0.0] = 1.0 binary_mask[binary_mask > 0.1] = 1.0 masked = image + binary_mask masked[masked > 1.0] = 1.0 predicted = predicted.squeeze().cpu() masked = masked.squeeze().cpu() image = tensor_to_image(predicted) image2 = tensor_to_image(masked) image.save(os.path.join(save_path, 'tmp_text.png')) image2.save(os.path.join(save_path, 'tmp_masked.png'))
print("Selected model's pixelwise accuracy on test dataset : {:.5f}%".format( test_accuracy * 100)) # visualize some outputs model_selected = model show_all(model_selected, X_test[2, :], y_test[2, :], cmap='jet') show_all(model_selected, X_test[11, :], y_test[11, :], cmap='jet') show_all(model_selected, X_test[-2, :], y_test[-2, :], cmap='jet') show_all(model_selected, X_test[43, :], y_test[43, :], cmap='jet') show_all(model_selected, X_train[2, :], y_train[2, :], cmap='jet') show_all(model_selected, X_train[11, :], y_train[11, :], cmap='jet') show_all(model_selected, X_train[-2, :], y_train[-2, :], cmap='jet') show_all(model_selected, X_train[43, :], y_train[43, :], cmap='jet') # demo on a kitti image model.eval() kitti_img = cv2.imread( "/content/gdrive/MyDrive/Segmentation/KiTTi/um_000007.png") kitti_tensor = torch.from_numpy(np.moveaxis(kitti_img, -1, 0)) show_img(kitti_tensor) show_pred_mask(model, kitti_tensor.float()) show_all(model, kitti_tensor.float(), kitti_tensor.float()[0, :]) # write results to disk model_load = model load_dir = r'/content/gdrive/MyDrive/KiTTi_dataset/testing_crop' save_dir = r'/content/gdrive/MyDrive/KiTTi_dataset/testing_crop_segnet_new' write_to_dir(model_load, load_dir, save_dir) load_dir = r'/content/gdrive/MyDrive/KiTTi_dataset/training_crop' save_dir = r'/content/gdrive/MyDrive/KiTTi_dataset/training_crop_segnet_new'