def register_shape(app, path): with suppress_stdout(): scene = brender.Scene(app, shape=(1000, 1000)) mesh = brender.Mesh.from_3ds(scene, path) mesh.remove_doubles() mesh.make_normals_consistent() mesh.enable_smooth_shading() mesh.unwrap_uv() with database.session_scope() as sess: shape = Shape(source=args.source_name, source_id=path.name, category=args.category) sess.add(shape) sess.flush() shape_dir = Path(config.BLOB_ROOT, 'shapes', str(shape.id)) bpy.ops.export_scene.obj(filepath=str(TMP_OBJ_PATH)) try: shape.models_dir.mkdir(parents=True) shutil.copy(str(TMP_OBJ_PATH), str(shape.obj_path)) shutil.copy(str(TMP_MTL_PATH), str(shape.mtl_path)) except: shape.obj_path.unlink() shape.mtl_path.unlink() shape_dir.rmdir() raise sess.commit()
def main(): with session_scope() as sess: materials = (sess.query(models.Material) .filter_by(type=models.MaterialType.MDL) .order_by(models.Material.id.asc()) .all()) app = brender.Brender() app.init(do_reset=False) bpy.ops.wm.open_mainfile(filepath=str(scene_path)) solid_scene = Scene(app, (1000, 1000), bscene=bpy.data.scenes['SolidMaterialScene'], aa_samples=96) cloth_scene = Scene(app, (1000, 1000), aa_samples=96, bscene=bpy.data.scenes['ClothMaterialScene']) camera = Camera(bpy.data.objects['Camera']) solid_scene.set_active_camera(camera) cloth_scene.set_active_camera(camera) pbar = tqdm(materials) for material in pbar: pbar.set_description(material.name) if material.data_exists('previews/bmps.png'): continue if material.substance in {'plastic', 'metal', 'wood', 'polished'}: scene = solid_scene mesh = Mesh(bpy.data.objects['SolidModel'], name='SolidModel') elif material.substance in {'fabric', 'leather'}: scene = cloth_scene mesh = Mesh(bpy.data.objects['ClothModel'], name='ClothModel') else: scene = solid_scene mesh = Mesh(bpy.data.objects['SolidModel'], name='SolidModel') print(f'Unknown material substance {material.substance}') bpy.context.screen.scene = scene.bobj uv_ref_scale = 2 ** (material.default_scale - 4) bmat = material_to_brender(material, uv_ref_scale=uv_ref_scale) brender.mesh.set_material(mesh.bobj, bmat) if bmat.has_uvs: mesh.compute_uv_density(base_size=12.0) with suppress_stdout(): rend = scene.render_to_array(format='exr') rend_srgb = to_8bit(linear_to_srgb(rend)) vis.image(rend_srgb.transpose((2, 0, 1)), win='rendering', opts={'title': material.name}) material.save_data('previews/bmps.exr', rend) material.save_data('previews/bmps.png', rend_srgb)
def add_pair_mesh(scene, pair: ExemplarShapePair, pair_dict, mat_by_id): segment_dict = pair_dict['segments'] rk_mesh, _ = pair.shape.load() with suppress_stdout(): mesh = Mesh.from_obj(scene, pair.shape.resized_obj_path, name=str(pair.id)) mesh.resize(1.0, axis=2) mesh.rotate(math.pi, (0, 0, 1)) mesh.make_normals_consistent() mesh.enable_smooth_shading() if args.type == 'inferred': mesh_materials = mesh.get_materials() bmats = [] for seg_id, seg_name in enumerate(rk_mesh.materials): if str(seg_id) not in segment_dict: continue mat_id = int(segment_dict[str(seg_id)]['material'][0]['id']) material = mat_by_id[mat_id] uv_ref_scale = 2**(material.default_scale - 3) # Activate only current material. for bobj in mesh_materials: if bobj.name.startswith(seg_name): tqdm.write( f'[Pair {pair.id}] Settings segment {seg_id} ({seg_name}) ' f'to material {material.name}, bobj_name={bobj.name}') bmat = loader.material_to_brender( material, bobj=bobj, uv_ref_scale=uv_ref_scale) bmats.append(bmat) # This needs to come after the materials are initialized. tqdm.write('Computing UV density...') mesh.compute_uv_density() elif args.type == 'none': mesh.set_material( BlinnPhongMaterial(diffuse_albedo=(0.1, 0.28, 0.8), specular_albedo=(0.04, 0.04, 0.04), roughness=0.1)) return mesh
def main(): with session_scope() as sess: materials = (sess.query(models.Material).order_by( models.Material.id.asc()).all()) # Initialize Brender and Scene. app = brender.Brender() app.init() scene = brender.Scene(app, shape=_REND_SHAPE, aa_samples=196) scene.set_envmap(_ENVMAP_PATH, scale=5.0) # Initialize Camera. rk_camera = cameras.spherical_coord_to_cam( 60.0, azimuth=math.pi / 2 - math.pi / 12, elevation=math.pi / 2 - math.pi / 6, cam_dist=2.5, max_len=_REND_SHAPE[0] / 2) camera = brender.CalibratedCamera(scene, rk_camera.cam_to_world(), rk_camera.fov) scene.set_active_camera(camera) with scene.select(): mesh = brender.mesh.Monkey(position=(0, 0, 0)) mesh.enable_smooth_shading() pbar = tqdm(materials) for material in pbar: pbar.set_description(material.name) uv_ref_scale = 2**(material.default_scale - 4) bmat = material_to_brender(material, uv_ref_scale=uv_ref_scale) brender.mesh.set_material(mesh.bobj, bmat) if bmat.has_uvs: mesh.compute_uv_density() with suppress_stdout(): rend = scene.render_to_array(format='exr') material.save_data('previews/monkey.studio021.exr', rend) material.save_data('previews/monkey.studio021.png', to_8bit(toolbox.images.linear_to_srgb(rend)))
def register_shape(app, shape, path): shape_dir = Path(config.BLOB_ROOT, 'shapes', str(shape.id)) with suppress_stdout(): scene = brender.Scene(app, shape=(1000, 1000)) mesh = brender.Mesh.from_3ds(scene, path) mesh.remove_doubles() mesh.make_normals_consistent() mesh.enable_smooth_shading() mesh.unwrap_uv() bpy.ops.export_scene.obj(filepath=str(TMP_OBJ_PATH)) try: if not shape.models_dir.exists(): shape.models_dir.mkdir(parents=True) shutil.copy(str(TMP_OBJ_PATH), str(shape.obj_path)) shutil.copy(str(TMP_MTL_PATH), str(shape.mtl_path)) except: shape.obj_path.unlink() shape.mtl_path.unlink() shape_dir.rmdir() raise
def do_render(scene, pair, mesh, envmaps_by_split, cam_angles, rk_mesh, seg_substances, mats_by_subst, bmats): time_begin = time.time() # Jitter camera params. cam_azimuth, cam_elevation = random.choice(cam_angles) cam_azimuth += random.uniform(-math.pi / 12, -math.pi / 12) cam_elevation += random.uniform(-math.pi / 24, -math.pi / 24) cam_dist = random.uniform(1.3, 1.75) cam_fov = random.uniform(FOV_MIN, FOV_MAX) rk_camera = cameras.spherical_coord_to_cam(cam_fov, cam_azimuth, cam_elevation, cam_dist=cam_dist, max_len=_REND_SHAPE[0] / 2) # Jitter envmap params. # Set envmap rotation so that the camera points at the "back" # but also allow some wiggle room. envmap_scale = random.uniform(0.9, 1.2) envmap = random.choice(envmaps_by_split[pair.shape.split_set]) envmap_rotation = (0, 0, (envmap.azimuth + pi / 2 + cam_azimuth + random.uniform(-pi / 24, pi / 24))) scene.set_envmap(envmap.get_data_path('hdr.exr'), scale=envmap_scale, rotation=envmap_rotation) if scene.camera is None: camera = brender.CalibratedCamera(scene, rk_camera.cam_to_world(), cam_fov) scene.set_active_camera(camera) else: scene.camera.set_params(rk_camera.cam_to_world(), cam_fov) segment_material_ids = {} segment_uv_ref_scales = {} segment_uv_rotations = {} segment_uv_translations = {} segment_mean_roughness = {} logger.info('Setting materials...') brdf_names = set() substances = set() for seg_name in rk_mesh.materials: for bobj in bpy.data.materials: if bobj.name == seg_name: if seg_name not in seg_substances: logger.warning('Substance unknown for %s', seg_name) return substance = seg_substances[seg_name] substances.add(substance) materials = mats_by_subst[substance] if len(materials) == 0: logger.warning('No materials for substance %s', substance) return material: models.Material = random.choice(materials) brdf_names.add(material.name) # Jitter UV map. uv_translation = (random.uniform(0, 1), random.uniform(0, 1)) uv_rotation = random.uniform(0, 2 * math.pi) uv_ref_scale = (2**(material.default_scale - 3.0 + random.uniform(-1.0, 0.5))) segment_uv_ref_scales[seg_name] = uv_ref_scale segment_material_ids[seg_name] = material.id segment_uv_rotations[seg_name] = uv_rotation segment_uv_translations[seg_name] = uv_translation bmat: NodesMaterial = loader.material_to_brender( material, bobj=bobj, uv_ref_scale=uv_ref_scale, uv_translation=uv_translation, uv_rotation=uv_rotation) segment_mean_roughness[seg_name] = \ float(bmat.mean_roughness()) bmats.append(bmat) # This needs to come after the materials are initialized. logger.info('Computing UV density...') mesh.compute_uv_density() logger.info('Rendering...') with suppress_stdout(): rend = scene.render_to_array(format='exr') caption = f'{envmap.name}, {str(substances)}, {str(brdf_names)}' rend_srgb = toolbox.images.linear_to_srgb(rend) time_elapsed = time.time() - time_begin logger.info('Rendered one in %fs', time_elapsed) seg_map = shortcuts.render_segments(rk_mesh, rk_camera) seg_vis = visualize_map(seg_map)[:, :, :3] normal_map = shortcuts.render_mesh_normals(rk_mesh, rk_camera) normal_map_blender = normal_map.copy() normal_map_blender[:, :, :3] += 1.0 normal_map_blender[:, :, :3] /= 2.0 normal_map_blender = np.round(255.0 * normal_map_blender).astype(np.uint8) figure = toolbox.images.to_8bit(np.hstack(( seg_vis, rend_srgb[:, :, :3], ))) segment_ids = {name: i for i, name in enumerate(rk_mesh.materials)} params = { 'split_set': pair.shape.split_set, 'pair_id': pair.id, 'shape_id': pair.shape_id, 'exemplar_id': pair.exemplar_id, 'camera': { 'fov': cam_fov, 'azimuth': cam_azimuth, 'elevation': cam_elevation, 'distance': cam_dist, }, 'envmap': { 'id': envmap.id, 'name': envmap.name, 'source': envmap.source, 'scale': envmap_scale, 'rotation': envmap_rotation, }, 'segment': { 'segment_ids': segment_ids, 'materials': segment_material_ids, 'uv_ref_scales': segment_uv_ref_scales, 'uv_translations': segment_uv_translations, 'uv_rotations': segment_uv_rotations, 'mean_roughness': segment_mean_roughness, }, 'time_elapsed': time_elapsed, } return { 'ldr': toolbox.images.to_8bit(rend_srgb), 'hdr': rend, 'seg_map': (seg_map + 1).astype(np.uint8), 'seg_vis': toolbox.images.to_8bit(seg_vis), 'normal_image': normal_map_blender, 'params': params, }
async def process_pair(app: brender.Brender, pair: ExemplarShapePair, cam_angles: List[Tuple[float, float]], mats_by_subst: Dict[str, List[NodesMaterial]], num_rends, *, envmaps_by_split, is_dry_run, epoch, collector_ctx, required_substances=None): rk_mesh, _ = pair.shape.load() rk_mesh.resize(1) with open(_TMP_MESH_PATH, 'w') as f: wavefront.save_obj_file(f, rk_mesh) seg_substances = utils.compute_segment_substances(pair) if required_substances: for subst in required_substances: if subst not in seg_substances.values(): return print(seg_substances.values()) scene = brender.Scene(app, shape=_REND_SHAPE, tile_size=(40, 40), aa_samples=128, diffuse_samples=3, specular_samples=3, background_mode=BackgroundMode.COLOR, background_color=(1, 1, 1, 1)) with suppress_stdout(): mesh = Mesh.from_obj(scene, _TMP_MESH_PATH) # mesh.remove_doubles() mesh.enable_smooth_shading() for i in range(num_rends): bmats = [] try: r = do_render(scene, pair, mesh, envmaps_by_split, cam_angles, rk_mesh, seg_substances, mats_by_subst, bmats) finally: while len(bmats) > 0: bmat = bmats.pop() bmat.bobj.name = bmat.bobj.name del bmat if not is_dry_run: await collector.send_data( **collector_ctx, split_set=pair.shape.split_set, pair_id=pair.id, epoch=epoch, iteration=i, params=r['params'], ldr_image=r['ldr'], hdr_image=r['hdr'], seg_map=r['seg_map'], seg_vis=r['seg_vis'], normal_image=r['normal_image'], )
def construct_inference_scene(app: brender.Brender, pair: models.ExemplarShapePair, pair_inference_dict, mat_by_id, envmap: models.Envmap, scene_type='inferred', num_samples=256, rend_shape=(1280, 1280), tile_size=(512, 512), frontal_camera=False, diagonal_camera=False, add_floor=True): if scene_type not in {'inferred', 'mtl'}: raise ValueError('Invalid scene type.') inference_dict = pair_inference_dict['segments'] rk_mesh, _ = pair.shape.load(size=1) rk_mesh.resize(1) scene = brender.Scene(app, shape=rend_shape, num_samples=num_samples, tile_size=tile_size, background_mode=BackgroundMode.COLOR, background_color=(1.0, 1.0, 1.0, 0)) envmap_rotation = (0, 0, (envmap.azimuth + math.pi/2 + pair.azimuth)) scene.set_envmap(envmap.get_data_path('hdr.exr'), scale=0.8, rotation=envmap_rotation) if frontal_camera: distance = 1.5 fov = 50 azimuth, elevation = pair.shape.get_frontal_angles() elif diagonal_camera: distance = 1.5 fov = 50 azimuth, elevation = pair.shape.get_demo_angles() else: distance = 4.0 fov = pair.fov azimuth, elevation = pair.azimuth, pair.elevation # Get exemplar camera parameters. rk_camera = cameras.spherical_coord_to_cam( fov, azimuth, elevation, cam_dist=distance, max_len=rend_shape[0]/2) camera = brender.CalibratedCamera(scene, rk_camera.cam_to_world(), fov) scene.set_active_camera(camera) with suppress_stdout(): mesh = Mesh.from_obj(scene, pair.shape.resized_obj_path) mesh.make_normals_consistent() mesh.enable_smooth_shading() mesh.recenter() if add_floor: min_pos = mesh.compute_min_pos() floor_mat = DiffuseMaterial(diffuse_color=(1.0, 1.0, 1.0)) floor_mesh = Plane(position=(0, 0, min_pos)) floor_mesh.set_material(floor_mat) if scene_type == 'inferred': for seg_id, seg_name in enumerate(rk_mesh.materials): if str(seg_id) not in inference_dict: continue mat_id = int(inference_dict[str(seg_id)]['material'][0]['id']) material = mat_by_id[mat_id] uv_ref_scale = 2 ** (material.default_scale - 3) print(f'[Pair {pair.id}] Settings segment {seg_id} ({seg_name}) ' f'to material {material.name}') # Activate only current material. for bobj in bpy.data.materials: if bobj.name == seg_name: bmat = loader.material_to_brender( material, bobj=bobj, uv_ref_scale=uv_ref_scale) scene.add_bmat(bmat) # This needs to come after the materials are initialized. print('Computing UV density...') mesh.compute_uv_density() return scene
def render_pair(app: brender.Brender, pair: ExemplarShapePair, materials_by_substance, base_out_dir): # Load shapenet mesh and resize to 1.0 to match Blender size. rk_mesh, _ = pair.shape.load() rk_mesh.resize(1) with open(_TMP_MESH_PATH, 'w') as f: wavefront.save_obj_file(f, rk_mesh) scene = brender.Scene(app, shape=_REND_SHAPE, aa_samples=32) envmap_rotation = (0, 0, (math.pi + math.pi/2 + pair.azimuth)) scene.set_envmap(_ENVMAP_PATH, scale=5, rotation=envmap_rotation) with suppress_stdout(): mesh = Mesh.from_obj(scene, _TMP_MESH_PATH) mat_substances = utils.compute_segment_substances(pair) # Get exemplar camera parameters. rk_camera = cameras.spherical_coord_to_cam( pair.fov, pair.azimuth, pair.elevation, cam_dist=2.0, max_len=_REND_SHAPE[0]/2) segment_im = render_segments(rk_mesh, rk_camera) camera = brender.CalibratedCamera( scene, rk_camera.cam_to_world(), pair.fov) scene.set_active_camera(camera) bmats = [] segment_pbar = tqdm(rk_mesh.materials) for segment_name in segment_pbar: segment_pbar.set_description(f'Segment {segment_name}') try: mat_subst = mat_substances[segment_name] materials = materials_by_substance[mat_subst] except KeyError: continue out_dir = Path(base_out_dir, str(pair.id), str(segment_name)) out_dir.mkdir(parents=True, exist_ok=True) material_pbar = tqdm(materials) for material in material_pbar: material_pbar.set_description(f'Material {material.id}') out_path = Path(out_dir, f'{material.id}.png') if out_path.exists(): material_pbar.set_description( f'Material {material.id} already rendered') continue # Activate only current material. bobj = None for bobj in bpy.data.materials: if bobj.name == segment_name: break bobj_matches = [o for o in bpy.data.materials if o.name == segment_name] if len(bobj_matches) == 0: bmat = InvisibleMaterial(bobj=bobj) else: bmat = loader.material_to_brender(material, bobj=bobj) bmats.append(bmat) with suppress_stdout(): rend_im = scene.render_to_array() vis.image(rend_im.transpose((2, 0, 1)), win='rend-im') rend_im[segment_im != rk_mesh.materials.index(segment_name)] = 0 fg_bbox = mask_bbox(segment_im > -1) rend_im = crop_tight_fg(rend_im, _FINAL_SHAPE, bbox=fg_bbox, fill=0, use_pil=True) with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) skio.imsave(str(out_path), rend_im) while len(bmats) > 0: bmat = bmats.pop() bmat.bobj.name = bmat.bobj.name del bmat
def main(): with session_scope() as sess: materials = ( sess.query(models.Material) # .filter(sa.and_(models.Material.default_scale.isnot(None), # models.Material.substance == 'fabric')) # .filter(models.Material.type == MaterialType.MDL) .order_by(models.Material.id.asc()).all()) shape = sess.query(models.Shape).get(4682) # shape = sess.query(models.Shape).get(2333) # Initialize Brender and Scene. app = brender.Brender() app.init() scene = brender.Scene(app, shape=_REND_SHAPE, aa_samples=196) envmap_rotation = (0, 0, (math.pi + math.pi / 2 - math.pi / 2 - math.pi / 12)) scene.set_envmap(_ENVMAP_PATH, scale=2.0, rotation=envmap_rotation) # Initialize Camera. rk_camera = cameras.spherical_coord_to_cam( 60.0, azimuth=-math.pi / 2 - math.pi / 12, elevation=math.pi / 2 - math.pi / 6, cam_dist=1.2, max_len=_REND_SHAPE[0] / 2) camera = brender.CalibratedCamera(scene, rk_camera.cam_to_world(), rk_camera.fov) scene.set_active_camera(camera) # Load shapenet mesh and resize to 1.0 to match Blender size. rk_mesh, _ = shape.load() rk_mesh.resize(1) with open(_TMP_MESH_PATH, 'w') as f: wavefront.save_obj_file(f, rk_mesh) mesh = brender.mesh.Mesh.from_obj(scene, _TMP_MESH_PATH) # Align the mesh to a camera looking straight at the diffuser. The diffuser # for Studio 021 is at azimuth=pi/2, elevation=pi/2. # brender.mesh.align_mesh_to_direction(mesh, math.pi / 2, math.pi / 2) # with scene.select(): # mesh = brender.mesh.Monkey(position=(0, 0, 0)) pbar = tqdm(materials) for material in pbar: uv_ref_scale = 2**(material.default_scale - 4) pbar.set_description(material.name) data_name = f'previews/chair_{material.default_scale}.png' if material.data_exists(data_name): continue # _, _, uv_density = measure_uv_density(mesh.bobj) bmat = material_to_brender(material, uv_ref_scale=uv_ref_scale) brender.mesh.set_material(mesh.bobj, bmat) if bmat.has_uvs: mesh.compute_uv_density() with suppress_stdout(): rend = scene.render_to_array(format='exr') # material.save_data( # f'previews/chair.exr', # rend) bpy.ops.wm.save_as_mainfile(filepath='/local1/data/test.blend') material.save_data(data_name, to_8bit(toolbox.images.linear_to_srgb(rend)))