Esempio n. 1
0
try:
    intrinsic_mean_normals = model['intrinsic_mean_normals']
except Exception:
    pass

# <codecell>

from pybug.image import MaskedNDImage
from pybug.io import auto_import
from pybug.landmark import labeller, ibug_68_closed_mouth
from landmarks import ibug_68_edge

sfs_index = 2
bej = auto_import('/vol/atlas/databases/alex_images/bej*.ppm')
# Create a 4 channel image where each channel is the greyscale of an image
ground_truth_images = MaskedNDImage(
    np.concatenate([im.as_greyscale().pixels for im in bej], axis=2))
intensity_image = bej[sfs_index].as_greyscale()

intensity_image.landmarks = bej[0].landmarks
ground_truth_images.landmarks['PTS'] = bej[0].landmarks['PTS']

labeller([ground_truth_images, intensity_image], 'PTS', ibug_68_closed_mouth)
# labeller([ground_truth_images, intensity_image], 'PTS', ibug_68_edge)

lights = np.array([[0.5, 0.4, 2], [-0.5, 0.4, 2], [-0.5, -0.4, 2],
                   [0.5, -0.4, 2]])

# <codecell>

from pybug.transform.tps import TPS
from warp import build_similarity_transform
Esempio n. 2
0
        normal_model = model['appearance_model']
        reference_frame = model['template']
        mean_normals = model['mean_normals']
        reference_frame = model['template']
        try:
            intrinsic_mean_normals = model['intrinsic_mean_normals']
        except Exception:
            intrinsic_mean_normals = None

        # Estimate light direction for image
        I = intensity_image.as_vector()
        estimate_light = np.dot(pinv2(mean_normals), I)
        print estimate_light

        # Perform SFS
        warped_intensity_image = MaskedNDImage(intensity_image.pixels.copy(),
                                               mask=intensity_image.mask)
        initial_estimate_image = warped_intensity_image.from_vector(
            mean_normals.copy(), n_channels=3)

        mapping_object = build_mapping_object(feature_space,
                                              initial_estimate_image,
                                              intrinsic_mean_normals)
        # Normalise the image so that it has unit albedo?
        #warped_intensity_image.masked_pixels /= ground_truth_albedo.masked_pixels
        #warped_intensity_image.masked_pixels /= np.max(warped_intensity_image.masked_pixels)
        reconstructed_normals = sfs(warped_intensity_image,
                                    initial_estimate_image, normal_model,
                                    estimate_light, n_iters=200,
                                    mapping_object=mapping_object)

        normals[subject_id][feature_space] = reconstructed_normals
Esempio n. 3
0
std_angular_error_results = np.zeros([len(yaleb_subjects),
                                      len(feature_spaces)])

# 5 feature spaces + ground truth
normals = dict(zip(yaleb_subjects, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}]))
for s in normals.values():
    s.update(zip(['ground_truth'] + feature_spaces, [None] * (len(feature_spaces) + 1)))

for i, subject_id in enumerate(yaleb_subjects):
    print "Running experiment for {0}".format(subject_id)

    subject_images = [auto_import(os.path.join(yaleb_path, subject_id[-3:], light[0].format(subject_id)))[0]
                      for light in image_light_paths]
    # Create a 4 channel image where each channel is the greyscale of an image
    ground_truth_images = MaskedNDImage(
        np.concatenate([im.pixels
                        for im in subject_images], axis=2))

    # Choose the first image as the reconstruction candidate
    # (frontal illumination)
    intensity_image = deepcopy(subject_images[0])

    # Pass landmarks to all ground truth images
    ground_truth_images.landmarks['PTS'] = intensity_image.landmarks['PTS']

    # Label with correct labels
    labeller([ground_truth_images, intensity_image],
             'PTS', ibug_68_closed_mouth)

    # Constrain to mask
    ground_truth_images.constrain_mask_to_landmarks(