def gpu_prediction_sample(args): in_files = args.input net = UNet(n_channels=3, n_classes=1) print("Loading model {}".format(args.model)) print("Using CUDA version of the net, prepare your GPU !") net.cuda() net.load_state_dict(torch.load(args.model)) print("Model loaded !") for i, fn in enumerate(in_files): print("\nPredicting image {} ...".format(fn)) img = Image.open(fn) if img.size[0] < img.size[1]: print("Error: image height larger than the width") mask = predict_img(net=net, full_img=img, scale_factor=args.scale, out_threshold=args.mask_threshold, use_dense_crf=not args.no_crf, use_gpu=not args.cpu) print("Visualizing results for image {}, close to continue ...".format( fn)) plot_img_and_mask(img, mask)
def main(raw_args): args = get_args(raw_args) args.model = '/home/zhaojin/data/TacomaBridge/segdata/train/checkpoint/logloss_softmax/CP12.pth' in_files = ['/home/zhaojin/data/TacomaBridge/segdata/train/img/00034.png'] out_files = ['/home/zhaojin/my_path/dir/segdata/predict/00025.png'] net = UNet(n_channels=1, n_classes=4) print("Loading model {}".format(args.model)) if not args.cpu: print("Using CUDA version of the net, prepare your GPU !") net.cuda() net.load_state_dict(torch.load(args.model)) else: net.cpu() net.load_state_dict(torch.load(args.model, map_location='cpu')) print("Using CPU version of the net, this may be very slow") print("Model loaded !") for i, fn in enumerate(in_files): print("\nPredicting image {} ...".format(fn)) img = Image.open(fn) if img.size[0] < img.size[1]: print("Error: image height larger than the width") mask = predict_img(net=net, full_img=img, scale_factor=args.scale, out_threshold=args.mask_threshold, use_dense_crf=not args.no_crf, use_gpu=not args.cpu) if args.viz: print("Visualizing results for image {}, close to continue ...". format(fn)) plot_img_and_mask(img, mask) # if not args.no_save: # out_fn = out_files[i] # print('mask', mask) # result = mask_to_image(mask) # # result.save(out_files[i]) # # print("Mask saved to {}".format(out_files[i])) return mask
def prediction(args): in_files = args.input out_files = get_output_filenames(args) net = UNet(n_channels=3, n_classes=1) print("Loading model {}".format(args.model)) if not args.cpu: print("Using CUDA version of the net, prepare your GPU !") net.cuda() net.load_state_dict(torch.load(args.model)) else: net.cpu() net.load_state_dict(torch.load(args.model, map_location='cpu')) print("Using CPU version of the net, this may be very slow") print("Model loaded !") for i, fn in enumerate(in_files): print("\nPredicting image {} ...".format(fn)) img = Image.open(fn) if img.size[0] < img.size[1]: print("Error: image height larger than the width") mask = predict_img(net=net, full_img=img, scale_factor=args.scale, out_threshold=args.mask_threshold, use_dense_crf=not args.no_crf, use_gpu=not args.cpu) if args.viz: print("Visualizing results for image {}, close to continue ...". format(fn)) plot_img_and_mask(img, mask) if not args.no_save: out_fn = out_files[i] result = mask_to_image(mask) result.save(out_files[i]) print("Mask saved to {}".format(out_files[i]))
def main(raw_args=None): """example: python predict.py --model '/home/zhaojin/data/TacomaBridge/segdata/train/checkpoint/weight_logloss_softmax/CP30.pth' --input '/home/zhaojin/data/TacomaBridge/segdata/train/img/00034.png' --viz""" args = get_args(raw_args) print('args', args) # args.model = '/home/zhaojin/data/TacomaBridge/segdata/train/checkpoint/logloss_softmax/CP12.pth' # in_files = ['/home/zhaojin/data/TacomaBridge/segdata/train/img/00034.png' ] # out_files = ['/home/zhaojin/my_path/dir/segdata/predict/00025.png'] in_files = args.input net = UNet(n_channels=1, n_classes=4) print("Loading model {}".format(args.model)) if not args.cpu: print("Using CUDA version of the net, prepare your GPU !") net.cuda() net.load_state_dict(torch.load(args.model)) else: net.cpu() net.load_state_dict(torch.load(args.model, map_location='cpu')) print("Using CPU version of the net, this may be very slow") print("Model loaded !") for i, fn in enumerate(in_files): print("\nPredicting image {} ...".format(fn)) img = Image.open(fn) if img.size[0] < img.size[1]: print("Error: image height larger than the width") mask = predict_img(net=net, full_img=img, scale_factor=args.scale, out_threshold=args.mask_threshold, use_dense_crf=not args.no_crf, use_gpu=not args.cpu) if args.viz: print("Visualizing results for image {}, close to continue ...". format(fn)) mask = np.transpose(mask, axes=[1, 2, 0]) plot_img_and_mask(img, mask)
print("Model loaded !") for i, fn in enumerate(in_files): print("\nPredicting image {} ...".format(fn)) images, patient_number, frame_indices, rotated, true_masks = load_patient_images(fn) # if img.size[0] < img.size[1]: # print("Error: image height larger than the width") predictions = [] for i, image in enumerate(images): image = torch.FloatTensor(image) image = image.cuda() mask_pred = net(image[None, :, :, :]) # feed one at a time predictions.append((image[:, :, 0], mask_pred[0, :, :, 1])) # mask = predict_img(net=net, # full_img=img, # scale_factor=args.scale, # out_threshold=args.mask_threshold, # use_dense_crf=not args.no_crf, # use_gpu=not args.cpu) if args.viz: print("Visualizing results for image {}, close to continue ...".format(fn)) image = image[0] mask_pred = mask_pred[0][0] plot_img_and_mask(image, mask_pred, true_masks[i])
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') logging.info(f'Using device {device}') net.to(device=device) net.load_state_dict(torch.load(args.model, map_location=device)) logging.info("Model loaded !") for i, fn in enumerate(in_files): logging.info("\nPredicting image {} ...".format(fn)) img = Image.open(fn) mask = predict_img(net=net, full_img=img, scale_factor=args.scale, out_threshold=args.mask_threshold, use_dense_crf=False, device=device) if not args.no_save: out_fn = out_files[i] result = mask_to_image(mask) result.save(out_files[i]) logging.info("Mask saved to {}".format(out_files[i])) if args.viz: logging.info("Visualizing results for image {}, close to continue ...".format(fn)) plot_img_and_mask(img, mask)
default=True) parser.add_argument('--no-save', '-n', action='store_true', help="Do not save the output masks", default=True) return parser.parse_args() if __name__ == "__main__": args = get_args() in_files = args.input out_files = args.output device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') input_img = Image.open(in_files[0]) # print(input_img) net = ResUNetA(3, 1) net.to(device=device) # print(args.model) load = torch.load(args.model, map_location=device) # print(load.keys()) net.load_state_dict(load['netG']) mask = predict_mask(net, input_img, device) plot_img_and_mask(input_img, mask)
print("Model loaded !") for i, fn in enumerate(in_files): print("\nPredicting image {} ...".format(fn)) #img = Image.open(fn) img = cv2.imread(fn) img = Image.fromarray(img) mask_v, mask_h = predict_img(net=net, full_img=img, use_gpu=not args.cpu) if args.viz: print("Visualizing results for image {}, close to continue ...". format(fn)) plot_img_and_mask(img, mask_v) if not args.no_save: mask_v = mask_v.numpy() # mask_v=np.where(mask_v==1,mask_v, 0) # print(mask_v) result_v = mask_to_image(mask_v) result_v.save(out_files_v[i]) mask_h = mask_h.numpy() # mask_h=np.where(mask_h==1,mask_h,0) result_h = mask_to_image(mask_h) result_h.save(out_files_h[i]) print("Mask saved to {}".format(out_files_v[i])) print("Mask saved to {}".format(out_files_h[i]))