def _plot_gradient(self): """display the light dark domain.""" xmin, xmax = self._x_range ymin, ymax = self._y_range # Note that higher brightness has lower brightness value hi_brightness = self._env.const lo_brightness = max( 0.5 * (self._env.light - xmin)**2 + self._env.const, 0.5 * (self._env.light - xmax)**2 + self._env.const) # Plot a bunch of rectangular strips along the x axis # Check out: https://stackoverflow.com/questions/10550477 x = xmin verts = [] colors = [] while x < xmax: x_next = x + self._res verts.append([(x, ymin), (x_next, ymin), (x_next, ymax), (x, ymax)]) # compute brightness based on equation in the paper brightness = 0.5 * (self._env.light - x)**2 + self._env.const # map brightness to a grayscale color grayscale = int( round( util.remap(brightness, hi_brightness, lo_brightness, 255, 0))) grayscale_hex = util.rgb_to_hex((grayscale, grayscale, grayscale)) colors.append(grayscale_hex) x = x_next util.plot_polygons(verts, colors, ax=self._ax) self._ax.set_xlim(xmin, xmax) self._ax.set_ylim(ymin, ymax)
def main(args): # ------ Compute diffuse bases # Average all to get UV albedo rgb_sum = None for config_dir in tqdm(xm.os.sortglob(args.data_root, 'trainvali_*'), desc="Computing albedo"): rgb_path = join(config_dir, 'rgb.png') rgb = xm.io.img.load(rgb_path, as_array=True) rgb = xm.img.normalize_uint(rgb) if rgb_sum is None: rgb_sum = np.zeros_like(rgb) rgb_sum += rgb albedo = rgb_sum / rgb_sum.max() for config_dir in tqdm(xm.os.sortglob(args.data_root, '*'), desc="Computing diffuse bases"): # Modulate UV albedo with light visibility lvis_path = join(config_dir, 'lvis.png') lvis = xm.io.img.load(lvis_path, as_array=True) lvis = xm.img.normalize_uint(lvis) lvis = np.dstack([lvis] * 3) diffuse_uv = albedo * lvis diffuse_path = join(config_dir, 'diffuse.png') diffuse_uint = xm.io.img.write_arr(diffuse_uv, diffuse_path, clip=True) uv2cam_path = join(config_dir, 'uv2cam.npy') uv2cam = np.load(uv2cam_path) diffuse_camspc_uint = remap(diffuse_uint, uv2cam) diffuse_camspc_path = join(config_dir, 'diffuse_camspc.png') xm.io.img.write_img(diffuse_camspc_uint, diffuse_camspc_path) # ------ Generate file list gen_file_list(args)
def main(args): # Open scene xm.blender.scene.open_blend(args.scene) # Remove existing cameras and lights, if any for o in bpy.data.objects: o.select = o.type in ('LAMP', 'CAMERA') bpy.ops.object.delete() # Load camera and light cam = load_json(args.cam_json) light = load_json(args.light_json) # Add camera and light cam_obj = xm.blender.camera.add_camera(xyz=cam['position'], rot_vec_rad=cam['rotation'], name=cam['name'], f=cam['focal_length'], sensor_width=cam['sensor_width'], sensor_height=cam['sensor_height'], clip_start=cam['clip_start'], clip_end=cam['clip_end']) xm.blender.light.add_light_point(xyz=light['position'], name=light['name'], size=light['size']) # Common rendering settings xm.blender.render.easyset(n_samples=args.spp, color_mode='RGB') # Image and texture resolution imw = args.imh / cam['sensor_height'] * cam['sensor_width'] imw = safe_cast_to_int(imw) xm.blender.render.easyset(h=args.imh, w=imw) # Render full RGB # TODO: Render in .exr to avoid saturated pixels (and tone mapping) rgb_camspc_f = join(args.outdir, 'rgb_camspc.png') xm.blender.render.render(rgb_camspc_f) rgb_camspc = xm.io.img.load(rgb_camspc_f, as_array=True)[:, :, :3] # Render diffuse RGB obj = bpy.data.objects['object'] make_diffuse(obj) diffuse_camspc_f = join(args.outdir, 'diffuse_camspc.png') xm.blender.render.render(diffuse_camspc_f, obj_names=obj.name) diffuse_camspc = xm.io.img.load(diffuse_camspc_f, as_array=True)[:, :, :3] # Render alpha alpha_f = join(args.outdir, 'alpha.png') xm.blender.render.render_alpha(alpha_f, samples=args.spp) alpha = xm.io.img.load(alpha_f, as_array=True) alpha = xm.img.normalize_uint(alpha) # Cast rays through all pixels to the object xs, ys = np.meshgrid(range(imw), range(args.imh)) # (0, 0) # +--------> (w, 0) # | x # | # v y (0, h) xys = np.dstack((xs, ys)).reshape(-1, 2) ray_tos, x_locs, x_objnames, x_facei, x_normals = \ xm.blender.camera.backproject_to_3d( xys, cam_obj, obj_names=obj.name, world_coords=True) intersect = { 'ray_tos': ray_tos, 'obj_names': x_objnames, 'face_i': x_facei, 'locs': x_locs, 'normals': x_normals } # Compute mapping between UV and camera space uv2cam, cam2uv = calc_bidir_mapping(obj, xys, intersect, args.uvs, cached_unwrap=args.cached_uv_unwrap) uv2cam = add_b_ch(uv2cam) cam2uv = add_b_ch(cam2uv) uv2cam[alpha < 1] = 0 # mask out interpolated values that fall outside xm.io.img.write_arr(uv2cam, join(args.outdir, 'uv2cam.png'), clip=True) xm.io.img.write_arr(cam2uv, join(args.outdir, 'cam2uv.png'), clip=True) save_float16_npy(uv2cam[:, :, :2], join(args.outdir, 'uv2cam.npy')) save_float16_npy(cam2uv[:, :, :2], join(args.outdir, 'cam2uv.npy')) # Compute view and light cosines lvis_camspc, cvis_camspc = calc_cosines(cam_obj.location, light['position'], xys, intersect, obj.name) lvis_camspc = xm.img.denormalize_float(np.clip(lvis_camspc, 0, 1)) cvis_camspc = xm.img.denormalize_float(np.clip(cvis_camspc, 0, 1)) xm.io.img.write_img(cvis_camspc, join(args.outdir, 'cvis_camspc.png')) xm.io.img.write_img(lvis_camspc, join(args.outdir, 'lvis_camspc.png')) # Remap buffers to UV space cvis = remap(cvis_camspc, cam2uv) lvis = remap(lvis_camspc, cam2uv) diffuse = remap(diffuse_camspc, cam2uv) rgb = remap(rgb_camspc, cam2uv) xm.io.img.write_img(cvis, join(args.outdir, 'cvis.png')) xm.io.img.write_img(lvis, join(args.outdir, 'lvis.png')) xm.io.img.write_img(diffuse, join(args.outdir, 'diffuse.png')) xm.io.img.write_img(rgb, join(args.outdir, 'rgb.png')) if args.debug: # Remap it backwards to check if we get back the camera-space buffer # TODO: UV wrapped images may have seams/holes due to interpolation # errors (fixable by better engineering), but this should be fine # because the network will learn to eliminate such artifacts in # trying to match the camera-space ground truth cvis_camspc_repro = remap(cvis, uv2cam) lvis_camspc_repro = remap(lvis, uv2cam) diffuse_camspc_repro = remap(diffuse, uv2cam) rgb_camspc_repro = remap(rgb, uv2cam) xm.io.img.write_img(cvis_camspc_repro, join(args.outdir, 'cvis_camspc_repro.png')) xm.io.img.write_img(lvis_camspc_repro, join(args.outdir, 'lvis_camspc_repro.png')) xm.io.img.write_img(diffuse_camspc_repro, join(args.outdir, 'diffuse_camspc_repro.png')) xm.io.img.write_img(rgb_camspc_repro, join(args.outdir, 'rgb_camspc_repro.png')) # Dump camera and light copyfile(args.cam_json, join(args.outdir, 'cam.json')) copyfile(args.light_json, join(args.outdir, 'light.json')) # Dump neighbor information cam_nn = load_json(args.cam_nn_json) light_nn = load_json(args.light_nn_json) cam_name = name_from_json_path(args.cam_json) light_name = name_from_json_path(args.light_json) nn = {'cam': cam_nn[cam_name], 'light': light_nn[light_name]} dump_json(nn, join(args.outdir, 'nn.json'))