def __init__(self, width=None, height=None, headless=False):
        BaseRenderApp.__init__(self,
                               title='Texture mapping visualiser',
                               width=width,
                               height=height,
                               headless=headless)

        self.model = None

        resource_dir = util.get_resource_dir()
        vertex_path = path.join(resource_dir, '3d_assets',
                                'texture_uv_shader.vert')
        fragment_path = path.join(resource_dir, '3d_assets',
                                  'texture_uv_shader.frag')

        self.shader_view_mode = self.ShaderViewMode_3D
        self.shader_texture_mode = self.ShaderTextureMode_Projection
        self.uvShader = Shader.load(Shader.SL_GLSL,
                                    vertex=vertex_path,
                                    fragment=fragment_path)

        self.proj_buffer = None
        self.tex_buffer = None
        self.tex_buffer_texture = None

        self.screenshot_count = 0
        self.texture_width = None
        self.texture_height = None

        self.default_bg = [1.0, 1.0, 1.0, 1.0]

        self.camera_film_size = None
        self.camera_focal_length = None
def main():
    capture_path = path.join(util.get_resource_dir(), 'heart_texture_capture')
    true_texture_path = path.join(capture_path, 'base_texture.png')
    reconst_texture_path = path.join(capture_path, 'base_no_luma.png')
    mask_path = path.join(capture_path, 'base_confidence.png')

    output_path = path.join(capture_path, 'base_difference.png')

    measure_error(true_texture_path, reconst_texture_path, mask_path,
                  output_path)
    def capture_camera_screenshot(self):
        print('\nScreenshot:')
        print('  - Cam pos:', self.camera.getPos())
        print('  - Cam hpr:', self.camera.getHpr())

        screenshot = self.capture_screenshot()
        save_path = path.join(
            util.get_resource_dir(),
            '{}_screenshot.png'.format(self.screenshot_count))
        cv.imwrite(save_path, screenshot)
        self.screenshot_count += 1
Exemple #4
0
def main():
    # capture_path = path.join(util.get_resource_dir(), 'heart_texture_capture')
    capture_path = path.join(util.get_resource_dir(), 'iousfan_capture')
    projection_path_template = path.join(capture_path, '{}_projection.png')
    confidence_path_template = path.join(capture_path, '{}_confidence.png')
    output_path_template = path.join(capture_path, '{}_edges.png')
    total_images = 5

    start = time.time()
    measure_alignment_error(projection_path_template, confidence_path_template, output_path_template, total_images)
    end = time.time()
    duration = end - start
    print('Time taken: {}s'.format(np.round(duration, 2)))
def main():
    resource_dir = util.get_resource_dir()
    assets_dir = path.join(resource_dir, '3d_assets')

    model_path = path.join(assets_dir, 'placenta.obj')
    texture_path = path.join(assets_dir, 'placenta.png')
    normal_map_path = None
    camera_image_path = path.join(resource_dir, 'placenta_images',
                                  '{}_screenshot.png')
    capture_data_json_path = path.join(resource_dir, 'placenta_images',
                                       'capture_data.json')
    capture_folder_path = path.join(resource_dir, 'placenta_texture')
    base_capture_path = path.join(capture_folder_path, 'base_{}.png')
    texture_capture_path = path.join(capture_folder_path, '{}_{}.png')

    # Load capture data JSON
    capture_json = util.load_dict(capture_data_json_path)
    camera_film_size = capture_json.get('camera_film_size')
    camera_focal_length = capture_json.get('camera_focal_length')
    camera_pos = capture_json['camera_pos']
    camera_hpr = capture_json['camera_hpr']

    # When tex_mode is `true` the app captures reprojected textures in headless mode. When it is false, the app runs
    # in foreground and lets you explore the model.
    tex_mode = True

    renderer = None
    if tex_mode:
        texture_cv = cv.imread(texture_path)
        texture_height, texture_width = texture_cv.shape[:2]
        renderer = TextureMappingRenderApp(width=texture_width,
                                           height=texture_height,
                                           headless=True)
    else:
        renderer = TextureMappingRenderApp(width=720,
                                           height=576,
                                           headless=False)

    renderer.init_scene(model_path=model_path,
                        texture_path=texture_path,
                        normal_map_path=normal_map_path,
                        camera_film_size=camera_film_size,
                        camera_focal_length=camera_focal_length)

    def capture_texture(texture_type, name, index=None):
        texture_capture = renderer.capture_shader_texture(texture_type)
        if index is not None:
            save_path = texture_capture_path.format(index, name)
        else:
            save_path = base_capture_path.format(name)
        cv.imwrite(save_path, texture_capture)

    util.ensure_dir(capture_folder_path)
    for i in range(len(camera_pos)):
        print('Processing screenshot {}...'.format(i))

        renderer.update_projection(
            camera_image_path=camera_image_path.format(i),
            camera_pos=camera_pos[i],
            camera_hpr=camera_hpr[i])

        time.sleep(0.1)

        if tex_mode:
            if i == 0:
                capture_texture(renderer.ShaderTextureMode_Default, 'texture')
                capture_texture(renderer.ShaderTextureMode_Normal, 'normal')
                capture_texture(renderer.ShaderTextureMode_Mask, 'mask')

            capture_texture(renderer.ShaderTextureMode_Projection,
                            'projection', i)
            capture_texture(renderer.ShaderTextureMode_Visibility,
                            'visibility', i)
            capture_texture(renderer.ShaderTextureMode_Frustum, 'frustum', i)
            capture_texture(renderer.ShaderTextureMode_Light, 'light', i)

    if not tex_mode:
        renderer.run()

    renderer.shutdown_and_destroy()
Exemple #6
0
def main():
    resource_dir = util.get_resource_dir()
    assets_dir = path.join(resource_dir, '3d_assets')

    model_path = path.join(assets_dir, 'placenta.obj')
    texture_path = path.join(assets_dir, 'placenta.png')
    normal_map_path = None
    camera_image_path = path.join(resource_dir, 'placenta_images',
                                  '{}_screenshot.png')
    capture_data_json_path = path.join(resource_dir, 'placenta_images',
                                       'capture_data.json')
    capture_folder_path = path.join(resource_dir, 'placenta_texture')
    texture_capture_path = path.join(capture_folder_path, '{}_{}.png')

    # Load capture data JSON
    capture_json = util.load_dict(capture_data_json_path)
    camera_film_size = capture_json.get('camera_film_size')
    camera_focal_length = capture_json.get('camera_focal_length')
    camera_pos = capture_json['camera_pos']
    camera_hpr = capture_json['camera_hpr']

    # Prepare the renderer
    texture_cv = cv.imread(texture_path)
    texture_height, texture_width = texture_cv.shape[:2]
    renderer = TextureMappingRenderApp(width=texture_width,
                                       height=texture_height,
                                       headless=True)

    renderer.init_scene(model_path=model_path,
                        texture_path=texture_path,
                        normal_map_path=normal_map_path,
                        camera_film_size=camera_film_size,
                        camera_focal_length=camera_focal_length)

    # Specify the index of images that will be used as the ground truth
    base_image_index = 3

    # Specify the index of the image for which the camera pose will be adjusted
    new_image_index = 1

    base_projection = cv.imread(
        texture_capture_path.format(base_image_index, 'projection'))
    base_projection = cv.imread(
        texture_capture_path.format(base_image_index, 'projection'),
        cv.IMREAD_GRAYSCALE)
    base_projection = cv.Laplacian(base_projection, cv.CV_8UC1)
    base_projection = cv.blur(base_projection, (3, 3))
    base_mask = (cv.imread(
        texture_capture_path.format(base_image_index, 'confidence'),
        cv.IMREAD_GRAYSCALE) > 5)

    new_image_path = camera_image_path.format(new_image_index)

    cam_pos = camera_pos[new_image_index]
    cam_hpr = camera_hpr[new_image_index]

    renderer.update_projection(camera_image_path=new_image_path,
                               camera_pos=cam_pos,
                               camera_hpr=cam_hpr)

    first_n = None
    min_loss = 999999

    def loss(params, log=True):
        nonlocal first_n
        nonlocal min_loss

        renderer.update_camera_pose(params[:3], params[3:])
        new_projection = renderer.capture_shader_texture(
            renderer.ShaderTextureMode_Projection)
        new_projection = cv.cvtColor(new_projection, cv.COLOR_RGB2GRAY)
        new_projection = cv.Laplacian(new_projection, cv.CV_8UC1)
        new_projection = cv.blur(new_projection, (3, 3))
        new_mask = (renderer.capture_shader_texture(
            renderer.ShaderTextureMode_Visibility)[:, :, 0] > 10)

        e, n = compute_alignment_error_between(base_projection.astype(float),
                                               base_mask,
                                               new_projection.astype(float),
                                               new_mask)

        loss_val = e / n
        if first_n is not None:
            ratio = first_n / n
            if ratio < 0.9:
                loss_val = 9999
        else:
            first_n = n

        if log:
            print('Loss:', np.round(loss_val, 3))
            if loss_val < min_loss:
                min_loss = loss_val
                print('Min args:', params)
        return loss_val

    init_params = cam_pos + cam_hpr
    loss(init_params, False)

    start = time.time()
    new_params = opt.fmin_l_bfgs_b(loss,
                                   init_params,
                                   approx_grad=True,
                                   epsilon=0.3,
                                   pgtol=1,
                                   maxiter=50,
                                   maxfun=100)[0]
    end = time.time()
    duration = end - start

    print('Initial loss:', np.round(loss(init_params, False), 6))
    print('Final loss:', np.round(loss(new_params, False), 6))
    print('Time taken: {}s'.format(np.round(duration, 2)))
    print('')
    new_pos = new_params[:3]
    new_hpr = new_params[3:]
    camera_pos[new_image_index] = new_pos.tolist()
    camera_hpr[new_image_index] = new_hpr.tolist()
    print('New pos:', new_pos)
    print('New hpr:', new_hpr)

    # Update capture data with the new estimates
    util.save_dict(capture_data_json_path, capture_json)

    renderer.shutdown_and_destroy()
def main():
    resource_dir = util.get_resource_dir()
    json_path = path.join(resource_dir, 'placenta_images', 'capture_data.json')
    capture_path = path.join(resource_dir, 'placenta_texture')

    merge_textures(json_path, capture_path)