def process(self): im_data = [] for name in INPORT_LIST: im_data.append(self.getInport(name).getData()) if len(im_data) < 2: print("Not enough images input") print("Images input is {} expected {}".format(len(im_data), 2)) return -1 if not (im_data[0].dimensions == im_data[1].dimensions): print("Operation is incompatible with images of different size") print("Size 1: ", im_data[0].dimensions) print("Size 2: ", im_data[1].dimensions) return -1 out_image = Image(im_data[0].dimensions, inviwopy.data.formats.DataVec4UINT8) im_colour = [] for idx, name in enumerate(INPORT_LIST): im_colour.append(im_data[idx].colorLayers[0].data) out_colour = get_diff_image(im_colour[0], im_colour[1]) out_image.colorLayers[0].data = out_colour im_depth = [] for idx, name in enumerate(INPORT_LIST): im_depth.append(im_data[idx].depth.data) out_depth = get_diff_image(im_depth[0], im_depth[1], np.float32) out_image.depth.data = out_depth self.getOutport("outport").setData(out_image)
def process(self): """Perform the model warping and output an image grid""" if self.getPropertyByIdentifier("off").value: print("Image warping is currently turned off") return 1 start_time = time.time() if self.getPropertyByIdentifier("display_input").value: im_data = [] for name in INPORT_LIST: im_data.append(self.getInport(name).getData()) out_image = Image(OUT_SIZE, DTYPE) out = resize( im_data[0].colorLayers[0].data.transpose(1, 0, 2), OUT_SIZE_LIST) with warnings.catch_warnings(): warnings.simplefilter("ignore") inter_out = img_as_ubyte(out) out_image.colorLayers[0].data = inter_out self.getOutport("outport").setData(out_image) return 1 if model is None: print("No model for synthesis") return -1 cam = inviwopy.app.network.EntryExitPoints.camera im_data = [] for name in INPORT_LIST: im_data.append(self.getInport(name).getData()) for im in im_data: if not (im_data[0].dimensions == im.dimensions): print("Operation is incompatible with images of different size") print("Size 1: ", im_data[0].dimensions) print("Size 2: ", im.dimensions) return -1 out_image = Image(OUT_SIZE, DTYPE) sample_image = Image(SAMPLE_SIZE, DTYPE) im_colour = [] for idx, name in enumerate(INPORT_LIST): im_colour.append(im_data[idx].colorLayers[0].data[:, :, :3].transpose(1, 0, 2)) im_depth = [] near = cam.nearPlane far = cam.farPlane baseline = 0.5 focal_length = cam.projectionMatrix[0][0] fov = cam.fov.value for idx, name in enumerate(INPORT_LIST): im_depth.append( conversions.depth_to_pixel_disp( im_data[idx].depth.data.transpose(1, 0), near=near, far=far, baseline=baseline, focal_length=focal_length, fov=fov, image_pixel_size=float(im_data[0].dimensions[0])) ) sample = { 'depth': torch.tensor(im_depth[0], dtype=torch.float32).unsqueeze_(0), 'colour': torch.tensor(im_colour[0], dtype=torch.float32).unsqueeze_(0), 'grid_size': GRID_SIZE} warped = data_transform.transform_inviwo_to_warped(sample) desired_shape = warped['shape'] im_input = warped['inputs'].unsqueeze_(0) if cuda: im_input = im_input.cuda() model.eval() output = model(im_input) output += im_input output = torch.clamp(output, 0.0, 1.0) end_time = time.time() - start_time print("Grid light field rendered in {:4f}".format(end_time)) out_unstack = data_transform.undo_remap( output[0], desired_shape, dtype=torch.float32) out_colour = cnn_utils.transform_lf_to_torch( out_unstack ) output_grid = vutils.make_grid( out_colour, nrow=8, range=(0, 1), normalize=False, padding=2, pad_value=1.0) output_grid = resize( output_grid.cpu().detach().numpy().transpose(1, 2, 0), OUT_SIZE_LIST) with warnings.catch_warnings(): warnings.simplefilter("ignore") inter_out = img_as_ubyte(output_grid) #inter_out = denormalise_lf(output_grid) #inter_out = inter_out.cpu().detach().numpy().astype(np.uint8).transpose(1, 2, 0) # Add an alpha channel here shape = tuple(OUT_SIZE_LIST) + (4,) final_out = np.full(shape, 255, np.uint8) final_out[:, :, :3] = inter_out shape = tuple(SAMPLE_SIZE_LIST) + (4,) sample_out = np.full(shape, 255, np.uint8) sample_out[:, :, :3] = np.around( data_transform.denormalise_lf( out_unstack).cpu().detach().numpy() ).astype(np.uint8)[self.getPropertyByIdentifier("sample_num").value] # Inviwo expects a uint8 here out_image.colorLayers[0].data = final_out sample_image.colorLayers[0].data = sample_out self.getOutport("outport").setData(out_image) self.getOutport("sample").setData(sample_image) end_time = time.time() - start_time print("Overall render time was {:4f}".format(end_time))