def _compile_into_webpage(batch_dirs, out_html, title=None): rows, caps, types = [], [], [] # For each batch (which has just one sample) for batch_dir in batch_dirs: for metadata_path in sorted( glob(join(batch_dir, '?_metadata.json'))): path_prefix = metadata_path[:-len('metadata.json')] metadata = ioutil.read_json(metadata_path) metadata = str(metadata) row = [ metadata, path_prefix + 'base-vs-pred.apng', path_prefix + 'gt-vs-pred.apng', path_prefix + 'nn.png' ] rowcaps = [ "Metadata", "Prediction vs. Diffuse Base", "Prediction vs. Ground Truth", "Nearest Neighbor" ] rowtypes = ['text', 'image', 'image', 'image'] rows.append(row) caps.append(rowcaps) types.append(rowtypes) n_rows = len(rows) assert n_rows > 0, "No row" # Write HTML html = xm.vis.html.HTML() if title is not None: html.add_header(title) img_table = html.add_table() for r, rcaps, rtypes in zip(rows, caps, types): img_table.add_row(r, rtypes, captions=rcaps) html.save(out_html)
def __init__(self, config, mode, **kwargs): self.data_root = config.get('DEFAULT', 'data_root') self.data_paths = ioutil.read_json( self.data_root.rstrip('/') + '.json') # Because paths in JSON are relative, prepend data root directory for _, paths in self.data_paths.items(): for k, v in paths.items(): if k != 'complete': paths[k] = join(self.data_root, v) super().__init__(config, mode, **kwargs) # Trigger init. in a main thread before starting multi-threaded work. # See http://yaqs/eng/q/6292200559345664 for details Image.init()
def __init__(self, config, mode, **kwargs): self.data_root = config.get('DEFAULT', 'data_root') data_status_path = self.data_root.rstrip('/') + '.json' if not exists(data_status_path): raise FileNotFoundError( ("Data status JSON not found at \n\t%s\nRun " "$REPO/data_gen/postproc.py to generate it") % data_status_path) self.data_paths = ioutil.read_json(data_status_path) # Because paths in JSON are relative, prepend data root directory for _, paths in self.data_paths.items(): for k, v in paths.items(): if k != 'complete': paths[k] = join(self.data_root, v) super().__init__(config, mode, **kwargs) # Trigger init. in a main thread before starting multi-threaded work. # See http://yaqs/eng/q/6292200559345664 for details Image.init()
def _compile_into_video(batch_dirs, out_mp4, fps=12): frames = {} for batch_dir in tqdm(batch_dirs, desc="Compiling visualized batches"): for metadata_path in glob(join(batch_dir, '?_metadata.json')): path_prefix = metadata_path[:-len('metadata.json')] pred_path = path_prefix + 'pred.png' if not exists(pred_path): logger.warn("Skipping because of missing file:\n\t%s" % pred_path) continue # Metadata metadata = ioutil.read_json(metadata_path) id_ = metadata['id'] # Prediction pred = xm.io.img.load(pred_path, as_array=True) frame = pred frames[id_] = frame # Make video frames_sorted = [frames[k] for k in sorted(frames)] ioutil.write_video(frames_sorted, out_mp4, fps=fps)
def _load_data(self, id_): if isinstance(id_, tf.Tensor): id_ = id_.numpy().decode() paths = self.data_paths[id_] imh = self.config.getint('DEFAULT', 'imh') imw = self.config.getint('DEFAULT', 'imw') # Load images base = xm.io.img.load(paths['diffuse'], as_array=True)[:, :, :3] cvis = xm.io.img.load(paths['cvis'], as_array=True) lvis = xm.io.img.load(paths['lvis'], as_array=True) warp = ioutil.read_npy(paths['uv2cam']) if self.mode == 'test': rgb = np.zeros_like(base) # placeholders rgb_camspc = np.zeros((imh, imw, 3)) else: rgb = xm.io.img.load(paths['rgb'], as_array=True)[:, :, :3] rgb_camspc = xm.io.img.load(paths['rgb_camspc'], as_array=True)[:, :, :3] # Normalize to [0, 1] base = xm.img.normalize_uint(base) cvis = xm.img.normalize_uint(cvis) lvis = xm.img.normalize_uint(lvis) if self.mode != 'test': rgb = xm.img.normalize_uint(rgb) rgb_camspc = xm.img.normalize_uint(rgb_camspc) # Resize images uvh = self.config.getint('DEFAULT', 'uvh') base = xm.img.resize(base, new_h=uvh) cvis = xm.img.resize(cvis, new_h=uvh) lvis = xm.img.resize(lvis, new_h=uvh) rgb = xm.img.resize(rgb, new_h=uvh) rgb_camspc = xm.img.resize(rgb_camspc, new_h=imh, new_w=imw) # NOTE: We didn't resize warp because this introduces artifacts -- # always warp first and then resize # Neighbor diffuse base and full nn = ioutil.read_json(paths['nn']) nn_id = self._get_nn_id(nn) if nn_id is None: nn_id = 'incomplete-data_{cam}_{light}'.format(**nn) # NOTE: When neighbor is missing, simply return black placeholders nn_base = np.zeros_like(base) nn_rgb = np.zeros_like(rgb) nn_rgb_camspc = np.zeros_like(rgb_camspc) else: nn_base = xm.io.img.load(self.data_paths[nn_id]['diffuse'], as_array=True)[:, :, :3] nn_rgb = xm.io.img.load(self.data_paths[nn_id]['rgb'], as_array=True)[:, :, :3] nn_rgb_camspc = xm.io.img.load( self.data_paths[nn_id]['rgb_camspc'], as_array=True)[:, :, :3] nn_rgb_camspc = nn_rgb_camspc[:, :, :3] # discards alpha nn_base = xm.img.normalize_uint(nn_base) nn_rgb = xm.img.normalize_uint(nn_rgb) nn_rgb_camspc = xm.img.normalize_uint(nn_rgb_camspc) nn_base = xm.img.resize(nn_base, new_h=uvh) nn_rgb = xm.img.resize(nn_rgb, new_h=uvh) nn_rgb_camspc = xm.img.resize(nn_rgb_camspc, new_h=imh, new_w=imw) # Return base = base.astype(np.float32) cvis = cvis.astype(np.float32)[:, :, None] # HxWx1 lvis = lvis.astype(np.float32)[:, :, None] warp = warp.astype(np.float32) rgb = rgb.astype(np.float32) rgb_camspc = rgb_camspc.astype(np.float32) nn_base = nn_base.astype(np.float32) nn_rgb = nn_rgb.astype(np.float32) nn_rgb_camspc = nn_rgb_camspc.astype(np.float32) return \ id_, base, cvis, lvis, warp, rgb, rgb_camspc, nn_id, nn_base, \ nn_rgb, nn_rgb_camspc