def process(self): """Perform the model warping and output an image grid""" if self.getPropertyByIdentifier("off").value: print("Image warping is currently turned off") return 1 start_time = time.time() if self.getPropertyByIdentifier("display_input").value: im_data = [] for name in INPORT_LIST: im_data.append(self.getInport(name).getData()) out_image = Image(OUT_SIZE, DTYPE) out = resize( im_data[0].colorLayers[0].data.transpose(1, 0, 2), OUT_SIZE_LIST) with warnings.catch_warnings(): warnings.simplefilter("ignore") inter_out = img_as_ubyte(out) out_image.colorLayers[0].data = inter_out self.getOutport("outport").setData(out_image) return 1 if model is None: print("No model for synthesis") return -1 cam = inviwopy.app.network.EntryExitPoints.camera im_data = [] for name in INPORT_LIST: im_data.append(self.getInport(name).getData()) for im in im_data: if not (im_data[0].dimensions == im.dimensions): print("Operation is incompatible with images of different size") print("Size 1: ", im_data[0].dimensions) print("Size 2: ", im.dimensions) return -1 out_image = Image(OUT_SIZE, DTYPE) sample_image = Image(SAMPLE_SIZE, DTYPE) im_colour = [] for idx, name in enumerate(INPORT_LIST): im_colour.append(im_data[idx].colorLayers[0].data[:, :, :3].transpose(1, 0, 2)) im_depth = [] near = cam.nearPlane far = cam.farPlane baseline = 0.5 focal_length = cam.projectionMatrix[0][0] fov = cam.fov.value for idx, name in enumerate(INPORT_LIST): im_depth.append( conversions.depth_to_pixel_disp( im_data[idx].depth.data.transpose(1, 0), near=near, far=far, baseline=baseline, focal_length=focal_length, fov=fov, image_pixel_size=float(im_data[0].dimensions[0])) ) sample = { 'depth': torch.tensor(im_depth[0], dtype=torch.float32).unsqueeze_(0), 'colour': torch.tensor(im_colour[0], dtype=torch.float32).unsqueeze_(0), 'grid_size': GRID_SIZE} warped = data_transform.transform_inviwo_to_warped(sample) desired_shape = warped['shape'] im_input = warped['inputs'].unsqueeze_(0) if cuda: im_input = im_input.cuda() model.eval() output = model(im_input) output += im_input output = torch.clamp(output, 0.0, 1.0) end_time = time.time() - start_time print("Grid light field rendered in {:4f}".format(end_time)) out_unstack = data_transform.undo_remap( output[0], desired_shape, dtype=torch.float32) out_colour = cnn_utils.transform_lf_to_torch( out_unstack ) output_grid = vutils.make_grid( out_colour, nrow=8, range=(0, 1), normalize=False, padding=2, pad_value=1.0) output_grid = resize( output_grid.cpu().detach().numpy().transpose(1, 2, 0), OUT_SIZE_LIST) with warnings.catch_warnings(): warnings.simplefilter("ignore") inter_out = img_as_ubyte(output_grid) #inter_out = denormalise_lf(output_grid) #inter_out = inter_out.cpu().detach().numpy().astype(np.uint8).transpose(1, 2, 0) # Add an alpha channel here shape = tuple(OUT_SIZE_LIST) + (4,) final_out = np.full(shape, 255, np.uint8) final_out[:, :, :3] = inter_out shape = tuple(SAMPLE_SIZE_LIST) + (4,) sample_out = np.full(shape, 255, np.uint8) sample_out[:, :, :3] = np.around( data_transform.denormalise_lf( out_unstack).cpu().detach().numpy() ).astype(np.uint8)[self.getPropertyByIdentifier("sample_num").value] # Inviwo expects a uint8 here out_image.colorLayers[0].data = final_out sample_image.colorLayers[0].data = sample_out self.getOutport("outport").setData(out_image) self.getOutport("sample").setData(sample_image) end_time = time.time() - start_time print("Overall render time was {:4f}".format(end_time))
def depth_to_disparity(depth_data, metadata, image_pixel_size): m = metadata return conversions.depth_to_pixel_disp(depth_data, m['near'], m['far'], m['baseline'], m['focal_length'], m['fov'], image_pixel_size)