def main(): MESH_ROOT = './datasets/meshes/clean_serialized' CAR_ROSTER = [get_fn(path) for path in get_child_paths(MESH_ROOT)] POS_GBUF_ROOT = './datasets/gbuffers/position' NOR_GBUF_ROOT = './datasets/gbuffers/normal' MSK_GBUF_ROOT = './datasets/gbuffers/mask' HEIGHT_DIRT_ROOT = './datasets/textures/height' CURVE_DIRT_ROOT = './datasets/textures/curve' for name in CAR_ROSTER: print(f'Generating dirt for {name}') pos = imread(os.path.join(POS_GBUF_ROOT, f'{name}.png')) nrm = imread(os.path.join(NOR_GBUF_ROOT, f'{name}.png')) msk = imread(os.path.join(MSK_GBUF_ROOT, f'{name}.png')) h_dirt = height_dirt(pos, nrm, msk) imwrite(h_dirt, os.path.join(HEIGHT_DIRT_ROOT, f'{name}.png')) c_dirt = curve_dirt(pos, nrm, msk) imwrite(c_dirt, os.path.join(CURVE_DIRT_ROOT, f'{name}.png'))
'cam_translation': [0.0, -0.75, 0.0], 'cam_distance': 7.0, 'cam_fov': [45.0], 'cam_resolution': [512, 512], 'geometry_path': './datasets/meshes/serialized/octavia_clean.pth', 'tex_diffuse_color': [0.8, 0.8, 0.8], 'tex_specular_color': [0.8, 0.8, 0.8], 'envmap_path': './datasets/envmaps/one/sunsky.exr', 'envmap_signal_mean': 0.5, 'envmap_rotation': 0.0, 'opt_num_samples': [200, 1], 'opt_max_bounces': 2, 'opt_channels_str': ['radiance'], 'opt_render_seed': 0, } } render_config = RenderConfig() render_layer = RenderLayer(render_config, device) img = torch.tensor(imread(path), dtype=torch.float32, device=device) #for fpath in get_child_paths(meshes_path): # print(f'Rendering {fpath}') # render_config.data[render_config.cfg_id]['geo_mesh_path'] = fpath # name = get_fn(fpath) # out = render_layer(opaque) # imwrite(out, f'debug/new_mesh_qual/{name}.png') render_config.set_scene(scene_dict['test']) out = render_layer(img) imwrite(out, "debug/test_render_out.png")
ENVMAPS_ROOT = './datasets/envmaps/' COEFFS_DIR = 'sh_coeffs/' RASTERS_DIR = 'rasters/' resolution = (256, 128) device = torch.device('cuda:0') def deringing(coeffs, window): deringed_coeffs = torch.zeros_like(coeffs) deringed_coeffs[:, 0] += coeffs[:, 0] deringed_coeffs[:, 1:1 + 3] += coeffs[:, 1:1 + 3] * math.pow( math.sin(math.pi * 1.0 / window) / (math.pi * 1.0 / window), 4.0) deringed_coeffs[:, 4:4 + 5] += coeffs[:, 4:4 + 5] * math.pow( math.sin(math.pi * 2.0 / window) / (math.pi * 2.0 / window), 4.0) #deringed_coeffs[:, 9:9 + 7] += coeffs[:, 9:9 + 7] * math.pow(math.sin(math.pi * 3.0 / window) / (math.pi * 3.0 / window), 4.0) return deringed_coeffs fn_list = os.listdir(os.path.join(ENVMAPS_ROOT, COEFFS_DIR)) for fn in fn_list: coeffs = torch.tensor(np.load(os.path.join(ENVMAPS_ROOT, COEFFS_DIR, fn)).transpose(), device=device) deringed_coeffs = deringing(coeffs, 6.0) envmap = pyredner.SH_reconstruct(deringed_coeffs, resolution) imwrite( envmap.cpu(), os.path.join(ENVMAPS_ROOT, RASTERS_DIR, '{}.exr'.format(fn.split('.')[0])))
model = DeepLabModel(model_path) dataroot = './datasets/tiam/' resolution = 512 raw_dir = os.path.join(dataroot, 'raw') img_dir = os.path.join(dataroot, 'img') mask_dir = os.path.join(dataroot, 'mask') if not os.path.exists(img_dir): os.makedirs(img_dir) if not os.path.exists(mask_dir): os.makedirs(mask_dir) resize = Resize(resolution) fn_list = os.listdir(raw_dir) for fn in fn_list: # Fetch file metadata fn = fn.split('.')[0] print('Processing: {}'.format(fn), end='\t') raw_path = os.path.join(raw_dir, '{}.JPG'.format(fn)) img_path = os.path.join(img_dir, '{}.png'.format(fn)) mask_path = os.path.join(mask_dir, '{}.png'.format(fn)) img = resize(imread(raw_path)) * 255.0 img = np.rot90(img, -1) mask = model.run(img) print('Saving mask') imwrite(img / 255.0, img_path) imwrite(mask, mask_path)
BLENDER = '/home/budmonde/opt/blender/blender' OBJ_DIR = './datasets/meshes/learn' OUT_PATH = './datasets/textures/bake' if not os.path.exists(OUT_PATH): os.makedirs(OUT_PATH) sampler = RGBFileSamplerFactory('./datasets/distributions/diffuse.txt') data = dict() for in_path in get_child_paths(OBJ_DIR, ext='obj'): mesh_name = get_fn(path) out_path = os.path.join(OUT_PATH, f'{mesh_name}.png') data[mesh_name] = sampler() subprocess.run([ BLENDER, '--background', '--python', 'blender/add_dirt_blender.py', '--', '--input_path', in_path, '--output_path', out_path, '--albedo_r', str(data[path][0]), '--albedo_g', str(data[path][1]), '--albedo_b', str(data[path][2]) ], check=True) # Post process output image = imread(out_path) image = rotate(image, 180) imwrite(image, out_path) with open(os.path.join(OUT_PATH, 'data.json'), 'w') as f: json.dump(data, f)
label_fpath = os.path.join(LABEL_DIR, 'label_{}.png'.format(fn)) # Open images. Open label using PIL because image is encoded in uint8 img = imread(img_fpath) label = np.array(Image.open(label_fpath)) # Isolate masks label[label != 250] = 0. label[label == 250] = 1. # Clean up the mask label = label.astype(bool) threshold = 70000 skimage.morphology.remove_small_objects( label, min_size = threshold,in_place=True) skimage.morphology.remove_small_holes(label, area_threshold = threshold, in_place=True) label = label.astype('uint8') # Normalize the mask out = center_and_scale(img, label) # Reject if there was an issue with the image if out == None: print('Rejecting: {}'.format(fn)) continue # Write to disk print('Writing: {}'.format(fn)) imwrite(out[0], os.path.join(IMG_DIR, '{}.png'.format(fn))) imwrite(out[1], os.path.join(MASK_DIR, 'label_{}.png'.format(fn)))
def generate_poses(model_path, output_path): # Init logger log = dict() # Load renderer configs material_map, mesh_list, light_map = pyredner.load_obj(model_path) material_id_map = {} materials = [] count = 0 for key, value in material_map.items(): material_id_map[key] = count count += 1 materials.append(value) shapes = [] for mtl_name, mesh in mesh_list: shapes.append( pyredner.Shape(vertices=mesh.vertices, indices=mesh.indices, uvs=mesh.uvs, normals=mesh.normals, material_id=material_id_map[mtl_name])) envmap = pyredner.EnvironmentMap( torch.tensor(imread('./datasets/envmaps/one/sunsky.exr'), dtype=torch.float32, device=pyredner.get_device())) # Object pose parameters euler_angles = [0.0, 0.0, 0.0] translation = [0.0, -0.75, 0.0] up = [0.0, 1.0, 0.0] distance = 7.0 # Setup base scene to modify during iterations cam_params = camera_parameters(euler_angles, translation, distance, up) camera = pyredner.Camera(position=torch.tensor(cam_params[0], dtype=torch.float32), look_at=torch.tensor(cam_params[1], dtype=torch.float32), up=torch.tensor(cam_params[2], dtype=torch.float32), fov=torch.tensor([45.0]), clip_near=1e-2, resolution=(opt.resolution, opt.resolution), fisheye=False) scene = pyredner.Scene(camera, shapes, materials, area_lights=[], envmap=envmap) # Generate alphamasks for i in range(opt.num_elev): # Set elevation angle elev_pc = i / opt.num_elev elevation = opt.max_elev * elev_pc + opt.min_elev * (1 - elev_pc) euler_angles[1] = elevation # Calculate number of azimuthal iterations num_azimuth = int(opt.num_elev * math.sin(math.pi / 2 - elevation)) for j in range(num_azimuth): # Set azimuthal angle azimuth_pc = j / num_azimuth azimuth = math.pi * 2 * azimuth_pc euler_angles[0] = azimuth print('Params: Elevation - {:.4f}\tAzimuth - {:.4f}'\ .format(elevation, azimuth)) # Set Camera params cam_params = camera_parameters(euler_angles, translation, distance, up) # Update scene params scene.camera = pyredner.Camera( position=torch.tensor(cam_params[0], dtype=torch.float32), look_at=torch.tensor(cam_params[1], dtype=torch.float32), up=torch.tensor(cam_params[2], dtype=torch.float32), fov=torch.tensor([45.0]), clip_near=1e-2, resolution=(opt.resolution, opt.resolution), fisheye=False) args = pyredner.RenderFunction.serialize_scene( scene=scene, num_samples=1, max_bounces=1, channels=[redner.channels.alpha]) out = pyredner.RenderFunction.apply(1, *args) fn = gen_hash(6) imwrite(out, os.path.join(output_path, '{}.png'.format(fn))) log[fn] = {'elevation': elevation, 'azimuth': azimuth} return log
def main(): # Load arguments parser = argparse.ArgumentParser() # Scene args parser.add_argument('--geometry_path', type=str, default='./datasets/meshes/clean_serialized') parser.add_argument('--envmaps_path', type=str, default='./datasets/envmaps/one') parser.add_argument('--diffuse_refl_path', type=str, default='./datasets/distributions/diffuse.txt') parser.add_argument('--textures_path', type=str, default='./datasets/textures/curve') parser.add_argument('--texture_size', type=int, default=256) # Output args parser.add_argument('--root_path', type=str, default='./datasets/renders/') parser.add_argument('--num_imgs', type=int, default=1000) parser.add_argument('--label', type=str, default='debug') # Misc parser.add_argument( '--gpu_id', type=int, default=0, ) opt = parser.parse_args() # Create Output directory now = datetime.now() subdir = f'{opt.label}_{now.month}-{now.day}-{now.hour}-{now.minute}' out_path = os.path.join(opt.root_path, subdir) if not os.path.exists(out_path): os.makedirs(out_path) # Load samplers sampler = ConfigSampler({ 'cam_rotation': HemisphereSamplerFactory([0.0, 0.5], [0.0, 0.0], [0.0, 0.0]), 'cam_translation': BoxSamplerFactory([-0.1, 0.1], [-0.76, -0.74], [-0.1, 0.1]), 'cam_distance': ConstantSamplerFactory(7.0), 'cam_fov': ConstantSamplerFactory([45.0]), 'cam_resolution': ConstantSamplerFactory([256, 256]), 'geometry_path': PathSamplerFactory(opt.geometry_path, ext='pth'), 'tex_diffuse_color': RGBFileSamplerFactory(opt.diffuse_refl_path), 'tex_specular_color': ConstantSamplerFactory([0.8, 0.8, 0.8]), 'envmap_path': PathSamplerFactory(opt.envmaps_path, ext='exr'), 'envmap_signal_mean': ConstantSamplerFactory(0.5), 'envmap_rotation': ConstantSamplerFactory(0.0), 'opt_num_samples': ConstantSamplerFactory((200, 1)), 'opt_max_bounces': ConstantSamplerFactory(2), 'opt_channels_str': ConstantSamplerFactory(['radiance', 'alpha']), 'opt_render_seed': RandIntSamplerFactory(0, 1e6), }) # Init renderer device = torch.device(f'cuda:{opt.gpu_id}' if opt.gpu_id != -1 else 'cpu') config = RenderConfig() renderer = Render(config, device) log = dict() for i in range(opt.num_imgs): # Generate render id and scene configs key = gen_hash(6) while key in log.keys(): key = gen_hash() scene = sampler.generate() log[key] = scene config.set_scene(scene) # Set texture for rendering mesh_name = get_fn(config('geometry_path')) texture = imread(os.path.join(opt.textures_path, f'{mesh_name}.png')) texture = resize(texture, (opt.texture_size, opt.texture_size)) texture = torch.tensor(texture, dtype=torch.float32, device=device) # Time Render operation iter_start_time = time.time() out = renderer(texture) render_time = time.time() - iter_start_time print(f'Generated Image: #\t{i} -- {key} in {render_time}') imwrite(out[..., :3], os.path.join(out_path, 'img', f'{key}.png')) imwrite(out[..., 3:4], os.path.join(out_path, 'mask', f'{key}.png')) with open(os.path.join(out_path, 'data.json'), 'w') as metafile: json.dump(log, metafile)