def enhance(enhancer, upscaler, image: Image, device, factor: int = 5, half_precision: bool = False) -> Image: x = to_tensor(image).to(device).unsqueeze(0) h, w = x.shape[-2:] # original sizes x_size = (h, w) hp, wp = compute_padding(h, factor), compute_padding(w, factor) padding = (0, wp, 0, hp) # pading: left, right, top, bottom x_prime = F.pad(x, padding, mode="replicate") h_prime, w_prime = x_prime.shape[-2:] x_prime_prime_size = (h_prime // factor, w_prime // factor) x_prime_n = normalize(x_prime) x_prime_prime_n = F.interpolate(x_prime_n, size=x_prime_prime_size, mode=MODE, align_corners=True) print( f"Running inference at {x_prime_prime_size[1]}x{x_prime_prime_size[0]} pixels" ) # enhance the input, output is normalized with torch.no_grad(): if half_precision: y_hat_prime_prime_n = enhancer(x_prime_prime_n.half()) guide = x_prime_n.half() y_hat_prime_n = F.interpolate(y_hat_prime_prime_n, guide.shape[-2:], mode='nearest') y_hat_prime_n = upscaler(torch.cat([guide, y_hat_prime_n], dim=1)).float() else: y_hat_prime_prime_n = enhancer(x_prime_prime_n) guide = x_prime_n y_hat_prime_n = F.interpolate(y_hat_prime_prime_n, guide.shape[-2:], mode='nearest') y_hat_prime_n = upscaler(torch.cat([guide, y_hat_prime_n], dim=1)) y_hat_prime = denormalize(y_hat_prime_n) y_hat = remove_padding(x_size, y_hat_prime) result = clamp(y_hat) return output_transforms(result)
def enhance_straight( network, image: Image, device, half_precision: bool = False, ) -> Image: print("straight") x_n = normalize(to_tensor(image).to(device)).unsqueeze(0) h, w = x_n.shape[-2:] print(f"Running inference at {w}x{h} pixels") if half_precision: _y_hat_n = inference(network, x_n.half()).float() else: _y_hat_n = inference(network, x_n) y_hat = denormalize(_y_hat_n).cpu() result_image = output_transforms(y_hat) return result_image
def enhance_rmu(network, image: Image, size: int, device, half_precision: bool = False) -> Image: x_nd, x_n = input_transforms(image, size, device) h, w = x_nd.shape[-2:] print(f"Running inference at {w}x{h} pixels") output_size = x_n.shape[-2:] upscale = upscaler(output_size) if half_precision: _y_hat_nd = inference(network, x_nd.half()).float() else: _y_hat_nd = inference(network, x_nd) bmask_dn = (_y_hat_nd - x_nd).detach() bmask_n = upscale(bmask_dn) y_hat_n = bmask_n + x_n y_hat = denormalize(y_hat_n).cpu() result_image = output_transforms(y_hat) return result_image
def denormalize(input_path, output_path, **kwargs): helpers.denormalize(input_path, output_path)
model.load_weights(model_path) if(args.timer==True): time_elapsed0 = (time.clock() - time_start) print("****** Loading the model takes","%.2f" % time_elapsed0,'s *******') #load the image image = cv2.imread(args.image_path) #Read image image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB) #Turn image to RGB after imread reads it in BGR image = cv2.resize(image,(320,480)) image = np.expand_dims(image, axis=0) #Arrange the shape of the image preprocess_input = sm.get_preprocessing(args.backbone) #Normalize features of the image image = preprocess_input(image) #infer the segmentation print('Infering the avalanche...') time_start = time.clock() prediction = model.predict(image) if(args.timer==True): time_elapsed = (time.clock() - time_start) print("****** Infering the avalanche takes","%.2f" % time_elapsed,'s *******') if(args.timer==True): print('****** Total time',"%.2f" % (time_elapsed0+time_elapsed),'s ****** ') #Visualize the results pr_mask = prediction.round() visualize(image=denormalize(image.squeeze()),Prediction_mask=pr_mask[...,0].squeeze(),)