Beispiel #1
0
def per_vertex_occlusion(mesh_in_img, err_proportion=0.0001, render_diag=600):

    [x_r, y_r, z_r] = mesh_in_img.range()
    av_xy_r = (x_r + y_r) / 2.0

    rescale = render_diag / np.sqrt((mesh_in_img.range()[:2] ** 2).sum())
    rescale_z = av_xy_r / z_r

    mesh = Scale([rescale, rescale, rescale * rescale_z]).apply(mesh_in_img)
    mesh.points[...] = mesh.points - mesh.points.min(axis=0)
    mesh.points[:, :2] = mesh.points[:, :2] + 2
    shape = np.around(mesh.points.max(axis=0)[:2] + 2)
    print("shape!!!:", shape)
    
    bc, ti = rasterize_barycentric_coordinate_images(mesh, shape)
    si = rasterize_shape_image_from_barycentric_coordinate_images(
        as_colouredtrimesh(mesh), bc, ti)

    # err_proportion=0.01 is 1% deviation of total range of 3D shape
    threshold = render_diag * err_proportion
    xyz_found = si.as_unmasked().sample(mesh.with_dims([0, 1]), order=1).T
    err = np.sum((xyz_found - mesh.points) ** 2, axis=1)

    visible = err < threshold
    return visible
Beispiel #2
0
def get_landmark_points(mesh, img_shape=(320, 240), verbose=False):
    fitter = load_balanced_frontal_face_fitter()
    detector = load_dlib_frontal_face_detector()
    camera = perspective_camera_for_template(img_shape)

    # Pre-process - align the mesh roughly with the template
    aligned_mesh = align_mesh_to_template(mesh, load_template()).apply(mesh)

    mesh_in_img = camera.apply(aligned_mesh)

    bcs = rasterize_barycentric_coordinate_images(mesh_in_img, img_shape)
    img = rasterize_mesh_from_barycentric_coordinate_images(mesh_in_img, *bcs)
    shape_img = rasterize_shape_image_from_barycentric_coordinate_images(
        mesh, *bcs)
    # 2. Find the one bounding box in the rendered image
    bboxes = detector(img)
    if len(bboxes) != 1:
        raise ValueError("Expected to find one face - found {}".format(
            len(bboxes)))
    else:
        if verbose:
            print('Detected 1 face')
    # 3. Fit from the bounding box
    fr = fitter.fit_from_bb(img, bboxes[0])
    if verbose:
        print('AMM fitting successfully completed')
    # 4. Sample the XYZ image to build back the landmarks
    img_lms = fr.final_shape.from_mask(LANDMARK_MASK)

    # test to see if the landmark fell on the 3D surface or not
    occlusion_mask = img.mask.sample(img_lms).ravel()

    img.landmarks['__lsfm_on_surface'] = img_lms.from_mask(occlusion_mask)
    img.landmarks['__lsfm_off_surface'] = img_lms.from_mask(~occlusion_mask)
    return PointCloud(shape_img.sample(img.landmarks['__lsfm_on_surface']).T)
Beispiel #3
0
def landmark_template(mesh, img_shape=(320, 240), verbose=False):
    fitter = load_balanced_frontal_face_fitter()
    detector = load_dlib_frontal_face_detector()
    camera = perspective_camera_for_template(img_shape)

    # Pre-process - align the mesh roughly with the template
    aligned_mesh = prepare_template_reference_space(mesh)

    mesh_in_img = camera.apply(aligned_mesh)

    bcs = rasterize_barycentric_coordinate_images(mesh_in_img, img_shape)
    img = rasterize_mesh_from_barycentric_coordinate_images(mesh_in_img, *bcs)
    shape_img = rasterize_shape_image_from_barycentric_coordinate_images(
        mesh, *bcs)
    # 2. Find the one bounding box in the rendered image
    bboxes = detector(img)
    if len(bboxes) != 1:
        raise ValueError("Expected to find one face - found {}".format(
            len(bboxes)))
    else:
        if verbose:
            print('Detected 1 face')
    # 3. Fit from the bounding box
    fr = fitter.fit_from_bb(img, bboxes[0])
    if verbose:
        print('AMM fitting successfully completed')
    # 4. Sample the XYZ image to build back the landmarks
    img_lms = fr.final_shape

    # test to see if the landmark fell on the 3D surface or not
    mesh.landmarks["ibug68"] = PointCloud(Image.sample(shape_img, img_lms).T)
    mask = np.zeros(68, dtype=np.bool)
    mask[30] = True
    mesh.landmarks["nosetip"] = mesh.landmarks["ibug68"].lms.from_mask(mask)
Beispiel #4
0
def landmark_mesh(mesh, img_shape=(320, 240), verbose=False, template_fn=None):
    fitter = load_balanced_frontal_face_fitter()
    detector = load_dlib_frontal_face_detector()
    camera = perspective_camera_for_template(img_shape)

    # Pre-process - align the mesh roughly with the template
    aligned_mesh = align_mesh_to_template(mesh, load_template(template_fn)).apply(mesh)

    mesh_in_img = camera.apply(aligned_mesh)

    bcs = rasterize_barycentric_coordinate_images(mesh_in_img, img_shape)
    img = rasterize_mesh_from_barycentric_coordinate_images(mesh_in_img, *bcs)
    shape_img = rasterize_shape_image_from_barycentric_coordinate_images(
        mesh, *bcs)
    # 2. Find the one bounding box in the rendered image
    bboxes = detector(img)
    if len(bboxes) != 1:
        raise ValueError(
            "Expected to find one face - found {}".format(len(bboxes)))
    else:
        if verbose:
            print('Detected 1 face')
    # 3. Fit from the bounding box
    fr = fitter.fit_from_bb(img, bboxes[0])
    if verbose:
        print('AMM fitting successfully completed')
    # 4. Sample the XYZ image to build back the landmarks
    img_lms = fr.final_shape.from_mask(LANDMARK_MASK)

    # test to see if the landmark fell on the 3D surface or not
    occlusion_mask = img.mask.sample(img_lms).ravel()

    img.landmarks['__lsfm_on_surface'] = img_lms.from_mask(occlusion_mask)
    img.landmarks['__lsfm_off_surface'] = img_lms.from_mask(~occlusion_mask)
    return_dict = {
        'landmarks_2d': img_lms,
        'occlusion_mask': occlusion_mask,
        'landmarks_3d_masked': PointCloud(shape_img.sample(
            img.landmarks['__lsfm_on_surface']).T)
    }

    if (~occlusion_mask).sum() != 0:
        groups = ['dlib_0', '__lsfm_on_surface', '__lsfm_off_surface']
        marker_edge_colours = ['blue', 'yellow', 'red']
    else:
        groups = ['dlib_0', '__lsfm_on_surface']
        marker_edge_colours = ['blue', 'yellow']

    lm_img = img.rasterize_landmarks(group=groups,
                                     line_colour='blue',
                                     marker_edge_colour=marker_edge_colours)
    return_dict['landmarked_image'] = lm_img

    return return_dict
Beispiel #5
0
def shape_mask(mesh):
    bcoords_img_inst, tri_index_img_inst = rasterize_barycentric_coordinate_images(
        mesh, [256, 256])
    TI_inst = tri_index_img_inst.as_vector()
    BC_inst = bcoords_img_inst.as_vector(keep_channels=True).T
    sample_index = tri_index_img_inst.as_unmasked().sample(
        mesh.with_dims([0, 1])).squeeze()
    shape_mask_idx = np.unique(mesh.trilist[sample_index].flatten())

    shape_mask_arr = np.zeros([mesh.n_points])
    shape_mask_arr[shape_mask_idx] += 1

    return shape_mask_arr > 0
Beispiel #6
0
def fit(imagepath):

    image = mio.import_image(imagepath, normalize=False)

    if len(image.pixels.shape) == 2:
        image.pixels = np.stack([image.pixels, image.pixels, image.pixels])

    if image.pixels.shape[0] == 1:
        image.pixels = np.concatenate(
            [image.pixels, image.pixels, image.pixels], axis=0)

    print(image.pixels_with_channels_at_back().shape)

    bb = detect(image.pixels_with_channels_at_back())[0]
    initial_shape = aam_fitter.fit_from_bb(image, bb).final_shape

    result = fitter.fit_from_shape(image,
                                   initial_shape,
                                   max_iters=40,
                                   camera_update=True,
                                   focal_length_update=False,
                                   reconstruction_weight=1,
                                   shape_prior_weight=.4e8,
                                   texture_prior_weight=1.,
                                   landmarks_prior_weight=1e5,
                                   return_costs=True,
                                   init_shape_params_from_lms=False)

    mesh = ColouredTriMesh(result.final_mesh.points, result.final_mesh.trilist)

    def transform(mesh):
        return result._affine_transforms[-1].apply(
            result.camera_transforms[-1].apply(mesh))

    mesh_in_img = transform(lambertian_shading(mesh))
    expr_dir = image.path.parent
    p = image.path.stem
    raster = rasterize_mesh(mesh_in_img, image.shape)

    uv_shape = (600, 1000)
    template = shape_model.mean()
    unwrapped_template = optimal_cylindrical_unwrap(template).apply(template)

    minimum = unwrapped_template.bounds(boundary=0)[0]
    unwrapped_template = Translation(-minimum).apply(unwrapped_template)
    unwrapped_template.points = unwrapped_template.points[:, [1, 0]]
    unwrapped_template.points[:, 0] = unwrapped_template.points[:, 0].max(
    ) - unwrapped_template.points[:, 0]
    unwrapped_template.points *= np.array([.40, .31])
    unwrapped_template.points *= np.array([uv_shape])

    bcoords_img, tri_index_img = rasterize_barycentric_coordinate_images(
        unwrapped_template, uv_shape)
    TI = tri_index_img.as_vector()
    BC = bcoords_img.as_vector(keep_channels=True).T

    def masked_texture(mesh_in_image, background):

        sample_points_3d = mesh_in_image.project_barycentric_coordinates(
            BC, TI)

        texture = bcoords_img.from_vector(
            background.sample(sample_points_3d.points[:, :2]))

        return texture

    uv = masked_texture(mesh_in_img, image)

    t = TexturedTriMesh(
        result.final_mesh.points,
        image_coords_to_tcoords(uv.shape).apply(unwrapped_template).points, uv,
        mesh_in_img.trilist)

    m3io.export_textured_mesh(t,
                              str(expr_dir / Path(p).with_suffix('.mesh.obj')),
                              overwrite=True)
    mio.export_image(raster,
                     str(expr_dir / Path(p).with_suffix('.render.jpg')),
                     overwrite=True)