def render_model_exemplars(pbar,
                           pair: ExemplarShapePair,
                           render_shape=config.SHAPE_REND_SHAPE):
    warnings.simplefilter('ignore')

    camera = cameras.spherical_coord_to_cam(pair.fov, pair.azimuth,
                                            pair.elevation)

    pbar.set_description(f'[{pair.id}] Loading shape')
    mesh, materials = pair.shape.load()
    camera.near, camera.far = compute_tight_clipping_planes(
        mesh, camera.view_mat())

    pbar.set_description(f'[{pair.id}] Rendering segments')
    if not pair.data_exists(config.PAIR_FG_BBOX_NAME):
        segment_im = render_segments(mesh, camera)
        fg_mask = segment_im > -1
        fg_bbox = mask_bbox(fg_mask)
        segment_im = crop_tight_fg(segment_im,
                                   render_shape,
                                   bbox=fg_bbox,
                                   fill=-1,
                                   order=0)
        pair.save_data(config.SHAPE_REND_SEGMENT_VIS_NAME,
                       skimage.img_as_uint(visualize_map(segment_im)))
        pair.save_data(config.SHAPE_REND_SEGMENT_MAP_NAME,
                       (segment_im + 1).astype(np.uint8))

        tqdm.write(f" * Saving {pair.get_data_path(config.PAIR_FG_BBOX_NAME)}")
        pair.save_data(config.PAIR_RAW_SEGMENT_MAP_NAME,
                       (segment_im + 1).astype(np.uint8))
        pair.save_data(config.PAIR_FG_BBOX_NAME,
                       fg_mask.astype(np.uint8) * 255)
    else:
        fg_mask = pair.load_data(config.PAIR_FG_BBOX_NAME)
        fg_bbox = mask_bbox(fg_mask)

    if not pair.data_exists(config.SHAPE_REND_PHONG_NAME):
        pbar.set_description('Rendering phong')
        phong_im = np.clip(
            render_wavefront_mtl(mesh,
                                 camera,
                                 materials,
                                 config.SHAPE_REND_RADMAP_PATH,
                                 gamma=2.2,
                                 ssaa=3,
                                 tonemap='reinhard'), 0, 1)
        phong_im = crop_tight_fg(phong_im, render_shape, bbox=fg_bbox)
        pbar.set_description(f'[{pair.id}] Saving data')
        pair.save_data(config.SHAPE_REND_PHONG_NAME,
                       skimage.img_as_uint(phong_im))
def render_model_exemplars(pbar,
                           pair: ExemplarShapePair,
                           render_shape=config.SHAPE_REND_SHAPE):
    camera = cameras.spherical_coord_to_cam(pair.fov, pair.azimuth,
                                            pair.elevation)

    pbar.set_description(f'[{pair.id}] Loading shape')
    mesh, materials = pair.shape.load()
    camera.near, camera.far = compute_tight_clipping_planes(
        mesh, camera.view_mat())

    segment_im = render_segments(mesh, camera)
    fg_bbox = mask_bbox(segment_im > -1)

    pbar.set_description('Rendering preview')
    phong_im = np.clip(
        render_preview(mesh,
                       camera,
                       config.SHAPE_REND_RADMAP_PATH,
                       gamma=2.2,
                       ssaa=2), 0, 1)
    phong_im = crop_tight_fg(phong_im, render_shape, bbox=fg_bbox)

    vis.image(phong_im.transpose((2, 0, 1)), win='shape-preview')

    pbar.set_description(f'[{pair.id}] Saving data')
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        pair.save_data(config.SHAPE_REND_PREVIEW_NAME,
                       skimage.img_as_uint(phong_im))
def main():
    with session_scope() as sess:
        pairs = (sess.query(ExemplarShapePair)
                 .filter(ExemplarShapePair.distance < config.ALIGN_DIST_THRES)
                 .all())

        print(f'Fetched {len(pairs)} pairs. '
              f'align_dist_thres = {config.ALIGN_DIST_THRES}',
              len(pairs), config.ALIGN_DIST_THRES)

        pbar = tqdm(pairs)
        for pair in pbar:
            pbar.set_description(f'[Pair {pair.id}]')
            mesh, materials = pair.shape.load()
            camera = cameras.spherical_coord_to_cam(
                pair.fov, pair.azimuth, pair.elevation)
            camera.near, camera.far = \
                compute_tight_clipping_planes(mesh, camera.view_mat())
            segment_im = render_segments(mesh, camera)
            fg_bbox = mask_bbox(segment_im > -1)
            pair.params = {
                'camera': camera.tojsd(),
                'crop_bbox': fg_bbox,
            }
            sess.commit()
Exemple #4
0
def compute_uncropped_exemplar(exemplar_im, fg_mask):
    fg_bbox = mask_bbox(fg_mask)
    fg_bbox = bbox_make_square(fg_bbox)
    in_shape = bbox_shape(fg_bbox)
    print(in_shape)
    in_image = imresize(exemplar_im, in_shape)
    out_image = np.full((*fg_mask.shape, 3), dtype=np.uint8, fill_value=255)
    out_image[fg_bbox[0]:fg_bbox[1], fg_bbox[2]:fg_bbox[3]] = in_image
    return out_image
Exemple #5
0
def crop_largest_component(image, mask, shape):
    image = image.copy()[:, :, :3]
    image[~mask] = 1.0
    labels = morphology.label(mask)
    mask = labels == np.argmax(np.bincount(labels[labels > 0].flat))
    bbox = mask_bbox(mask)
    if MAT_CAND_NAME == config.MATERIAL_CAND_VGG16_WHOLE_NAME:
        image = crop_tight_fg(image,
                              shape=shape,
                              bbox=bbox,
                              fill=1.0,
                              use_pil=True)
    else:
        image = crop_tight_fg(image,
                              shape=shape,
                              bbox=bbox,
                              fill=1.0,
                              use_pil=True)
    return image[:, :, :3]
def worker(input_tup):
    pair, input_path = input_tup
    output_path = args.output_dir / f"{input_path.stem}.jpg"
    output_sm_path = args.output_dir / f"{input_path.stem}.small.jpg"
    if output_path.exists() and not args.overwrite:
        return

    try:
        rendering = imread(input_path)
    except OSError:
        tqdm.write(f"Cannot read {input_path!s}")
        input_path.unlink()
        return
    fg_mask = pair.load_data(config.PAIR_FG_BBOX_NAME)
    rendering = imresize(rendering, fg_mask.shape)
    bbox = bbox_make_square(mask_bbox(fg_mask))
    cropped = crop_bbox(rendering, bbox)
    cropped = imresize(cropped, (750, 750))
    imsave(output_path, cropped)
    cropped = imresize(cropped, (100, 100))
    imsave(output_sm_path, cropped)
Exemple #7
0
def main():
    logger.info("Loading models.")
    with session_scope() as sess:
        shapes = (
            sess.query(models.Shape).order_by(models.Shape.id.asc())
            # .filter_by(source='hermanmiller')
            .all())

    radmap_path = '/projects/grail/kpar/data/envmaps/rnl_cross.pfm'
    radmap = jsd.import_radiance_map(dict(path=radmap_path))
    scene = Scene()
    scene.set_radiance_map(radmap)

    dist = 200
    camera = PerspectiveCamera(size=(1000, 1000),
                               fov=0,
                               near=0.1,
                               far=5000.0,
                               position=(0, 0, -dist),
                               clear_color=(1, 1, 1, 0),
                               lookat=(0, 0, 0),
                               up=(0, 1, 0))

    renderer = SceneRenderer(scene,
                             camera=camera,
                             gamma=2.2,
                             ssaa=3,
                             tonemap='reinhard',
                             reinhard_thres=3.0,
                             show=args.preview)
    renderer.__enter__()

    for i, shape in enumerate(shapes):
        if shape.data_exists(config.SHAPE_REND_PHONG_NAME):
            continue
        if i < args.start:
            continue
        if args.end != -1 and i >= args.end:
            logger.info("Hit end {}={}.".format(i, args.end))
            return

        logger.info("[%d/%d] Processing %d", i + 1, len(shapes), shape.id)

        print(shape.obj_path)
        if not shape.obj_path.exists():
            logger.error('Shape %d does not have a UV mapped model.', shape.id)
            continue

        mesh = wavefront.read_obj_file(shape.obj_path)
        mesh.resize(100)
        materials = wavefront.read_mtl_file(shape.mtl_path, mesh)
        scene.add_mesh(mesh)

        for mat_name, mat in materials.items():
            roughness = math.sqrt(2 / (mat.specular_exponent + 2))
            # material = BlinnPhongMaterial(mat.diffuse_color,
            #                               mat.specular_color,
            #                               roughness)
            material = BlinnPhongMaterial(diff_color=(0.1, 0.28, 0.8),
                                          spec_color=(0.04, 0.04, 0.04),
                                          roughness=0.1)
            scene.put_material(mat_name, material)

        data_name = f'preview/phong.500x500.png'
        # if shape.data_exists(data_name):
        #     continue

        camera.position = spherical_to_cartesian(dist,
                                                 *shape.get_demo_angles())
        camera.fov = 50
        if args.preview:
            renderer.draw()
            renderer.swap_buffers()
        # Set format to RGBA so we can crop foreground using alpha.
        image = renderer.render_to_image(format='rgba')
        image = np.clip(image, 0, 1)
        fg_bbox = mask_bbox(image[:, :, 3] > 0)
        image = crop_tight_fg(image[:, :, :3], (500, 500),
                              bbox=fg_bbox,
                              use_pil=True)
        vis.image(image.transpose((2, 0, 1)), win='rend-preview')

        shape.save_data(config.SHAPE_REND_PHONG_NAME, image)
        scene.clear()
Exemple #8
0
def main():
    logger.info("Loading models.")
    with session_scope() as sess:
        shapes = sess.query(models.Shape).order_by(models.Shape.id.desc()).all()

    radmap_path = '/projects/grail/kpar/data/envmaps2/rnl.cross.exr'
    radmap = jsd.import_radiance_map(dict(path=radmap_path))
    scene = Scene()
    scene.set_radiance_map(radmap)

    dist = 200
    camera = PerspectiveCamera(
        size=(200, 200), fov=0, near=0.1, far=5000.0,
        position=(0, 0, -dist), clear_color=(1, 1, 1, 0),
        lookat=(0, 0, 0), up=(0, 1, 0))

    renderer = SceneRenderer(scene, camera=camera,
                             gamma=2.2, ssaa=3,
                             tonemap='reinhard',
                             reinhard_thres=3.0,
                             show=args.preview)
    renderer.__enter__()

    fovs = [50]
    elevations = np.linspace(math.pi/4, math.pi/2 + math.pi/16, 10)
    max_azimuth_samps = 36
    azimuth_by_elev = {}
    n_viewpoints = 0
    for phi in elevations:
        n_azimuths = int(round(max_azimuth_samps * math.sin(phi)))
        azimuth_by_elev[phi] = np.linspace(0, 2*math.pi, n_azimuths)
        n_viewpoints += n_azimuths

    for i, shape in enumerate(shapes):
        last_name = "alignment/renderings/fov=50,theta=6.28318531,phi=1.76714587.png"
        if shape.data_exists(last_name):
            tqdm.write(f'Skipping {shape.id}')
            continue

        if i < args.start:
            continue
        if args.end != -1 and i >= args.end:
            logger.info("Hit end {}={}.".format(i, args.end))
            return

        logger.info("[%d/%d] Processing %d", i + 1, len(shapes), shape.id)

        if not shape.obj_path.exists():
            logger.error('Shape %d does not have a UV mapped model.', shape.id)

        mesh = wavefront.read_obj_file(shape.obj_path)
        mesh.resize(100)
        materials = wavefront.read_mtl_file(shape.mtl_path, mesh)
        scene.add_mesh(mesh)

        for mat_name, mat in materials.items():
            roughness = math.sqrt(2/(mat.specular_exponent + 2))
            scene.put_material(mat_name,
                               BlinnPhongMaterial(mat.diffuse_color,
                                                  mat.specular_color,
                                                  roughness))

        iterables = []
        for fov, phi in itertools.product(fovs, elevations):
            for theta in azimuth_by_elev[phi]:
                iterables.append((fov, theta, phi))

        pbar = tqdm(iterables)
        for fov, theta, phi in pbar:
            rend_fname = f"fov={fov},theta={theta:.08f},phi={phi:.08f}.png"
            data_name = f'alignment/renderings/{rend_fname}'
            if shape.data_exists(data_name):
                continue
            pbar.set_description(rend_fname)

            camera.position = spherical_to_cartesian(dist, theta, phi)
            camera.fov = fov
            if args.preview:
                renderer.draw()
                renderer.swap_buffers()
             # Set format to RGBA so we can crop foreground using alpha.
            image = renderer.render_to_image(format='rgba')
            image = np.clip(image, 0, 1)
            fg_bbox = mask_bbox(image[:, :, 3] > 0)
            image = crop_tight_fg(
                image[:, :, :3], (100, 100), bbox=fg_bbox, use_pil=True)
            vis.image(image.transpose((2, 0, 1)), win='rend-preview')

            shape.save_data(data_name, image)
        scene.clear()
def render_pair(app: brender.Brender, pair: ExemplarShapePair,
                materials_by_substance, base_out_dir):
    # Load shapenet mesh and resize to 1.0 to match Blender size.
    rk_mesh, _ = pair.shape.load()
    rk_mesh.resize(1)
    with open(_TMP_MESH_PATH, 'w') as f:
        wavefront.save_obj_file(f, rk_mesh)

    scene = brender.Scene(app, shape=_REND_SHAPE, aa_samples=32)
    envmap_rotation = (0, 0, (math.pi + math.pi/2 + pair.azimuth))
    scene.set_envmap(_ENVMAP_PATH, scale=5, rotation=envmap_rotation)

    with suppress_stdout():
        mesh = Mesh.from_obj(scene, _TMP_MESH_PATH)

    mat_substances = utils.compute_segment_substances(pair)

    # Get exemplar camera parameters.
    rk_camera = cameras.spherical_coord_to_cam(
        pair.fov, pair.azimuth, pair.elevation, cam_dist=2.0,
        max_len=_REND_SHAPE[0]/2)

    segment_im = render_segments(rk_mesh, rk_camera)

    camera = brender.CalibratedCamera(
        scene, rk_camera.cam_to_world(), pair.fov)
    scene.set_active_camera(camera)

    bmats = []
    segment_pbar = tqdm(rk_mesh.materials)
    for segment_name in segment_pbar:
        segment_pbar.set_description(f'Segment {segment_name}')

        try:
            mat_subst = mat_substances[segment_name]
            materials = materials_by_substance[mat_subst]
        except KeyError:
            continue

        out_dir = Path(base_out_dir, str(pair.id), str(segment_name))
        out_dir.mkdir(parents=True, exist_ok=True)

        material_pbar = tqdm(materials)
        for material in material_pbar:
            material_pbar.set_description(f'Material {material.id}')
            out_path = Path(out_dir, f'{material.id}.png')
            if out_path.exists():
                material_pbar.set_description(
                    f'Material {material.id} already rendered')
                continue

            # Activate only current material.
            bobj = None
            for bobj in bpy.data.materials:
                if bobj.name == segment_name:
                    break

            bobj_matches = [o for o in bpy.data.materials
                            if o.name == segment_name]
            if len(bobj_matches) == 0:
                bmat = InvisibleMaterial(bobj=bobj)
            else:
                bmat = loader.material_to_brender(material, bobj=bobj)

            bmats.append(bmat)

            with suppress_stdout():
                rend_im = scene.render_to_array()

            vis.image(rend_im.transpose((2, 0, 1)), win='rend-im')

            rend_im[segment_im != rk_mesh.materials.index(segment_name)] = 0
            fg_bbox = mask_bbox(segment_im > -1)
            rend_im = crop_tight_fg(rend_im, _FINAL_SHAPE, bbox=fg_bbox,
                                    fill=0, use_pil=True)

            with warnings.catch_warnings():
                warnings.simplefilter('ignore', UserWarning)
                skio.imsave(str(out_path), rend_im)

    while len(bmats) > 0:
        bmat = bmats.pop()
        bmat.bobj.name = bmat.bobj.name
        del bmat