コード例 #1
0
ファイル: benchmark.py プロジェクト: Vagver/itwmm_evaluation
def calculate_errors_4DMaja_real(path_fits, path_gt, model):
    # Parameters:
    # --------------
    #  path_fits: 'string' of the directory that contains your reconstructed meshes.
    #             Meshes' filenames should be the same with the
    #             corresponding ground truth meshes filenames + the suffix of
    #             the mesh type (e.g <frame_number>.obj, <frame_number>.ply)
    #  path_gt  : 'string' of the full path of the ground truth mesh.
    #  model    : 'string' of the model template you are using. Should be one of the
    #             following: 'LSFM', 'Basel', 'Surrey'
    # Returns:
    # --------------
    #  errors   : python list that contains the error per vertex between the ground
    #             truth mesh and a mean mesh calculated from your reconstructed meshes

    path_fits = Path(path_fits)
    path_gt = Path(path_gt)

    # load meshes' filenames
    filenames = [p.name for p in path_fits.glob('*')]
    filenames.sort()

    # load gt_mesh (it is olny one, maja's neautral face)
    gt_mesh = m3io.import_mesh(path_gt, texture_resolver=None)
    gt_mesh.landmarks['ibug49'] = PointCloud(gt_mesh.points[lms_indexes][19:])

    errors = [0]

    # accumulate fits
    acc_points = np.zeros((gt_mesh.n_points, 3))
    for i, filename in enumerate(print_progress(filenames)):

        fit_3d = m3io.import_mesh(path_fits / filename, texture_resolver=None)
        if model == 'Surrey':
            lms = face_ibug_49_to_face_ibug_49(
                PointCloud(fit_3d.points[eos.load_eos_low_res_lm_index()]))
            fit_3d = eos.upsample_eos_low_res_to_fw_no_texture(fit_3d)
            fit_3d.landmarks['ibug49'] = lms
        elif model == 'LSFM' or model == 'Basel':
            fit_3d.landmarks['ibug49'] = PointCloud(
                fit_3d.points[lms_indexes][19:])
        else:
            print('Error: Not supported model template')
            return

        acc_points += fit_3d.points

    # create mean_fit_3d
    mean_fit_3d = TriMesh(acc_points / len(filenames), gt_mesh.trilist)
    mean_fit_3d.landmarks['ibug49'] = PointCloud(
        mean_fit_3d.points[lms_indexes][19:])

    # calculate per vertex errors between the neutral gt_mesh and the mean_fit_3d
    gt_mesh, eval_mask = landmark_and_mask_gt_mesh(gt_mesh, distance=1)
    errors[0], _, _ = mask_align_and_calculate_dense_error(
        mean_fit_3d, gt_mesh, eval_mask)

    return errors
コード例 #2
0
def menpo3d_non_rigid_icp(fitted_obj, gt_obj, fitted_imp_3d_points,
                          gt_imp_3d_points, output_obj):
    import sys
    #sys.path.append("/user/HS204/m09113/scripts/menpo_playground/src/lib/python3.5/site-packages")
    #sys.path.append("/user/HS204/m09113/miniconda2/envs/menpo/lib/python2.7/site-packages/")
    from menpo3d.correspond import non_rigid_icp
    from menpo3d.io.output.base import export_mesh
    import menpo3d.io as m3io
    import menpo

    # try something
    # lm_weights = [5, 2, .5, 0, 0, 0, 0, 0]  # default weights
    # lm_weights = [10, 8, 5, 3, 2, 0.5, 0, 0]
    lm_weights = [25, 20, 15, 10, 8, 5, 3, 1]
    # lm_weights = [2, 1, 0, 0, 0, 0, 0, 0]
    # lm_weights = [25, 20, 15, 10, 5, 2, 1, 0]
    # lm_weights = [100, 0, 0, 0, 0, 0, 0, 0]
    # lm_weights = [1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000]

    stiff_weights = [50, 20, 5, 2, 0.8, 0.5, 0.35, 0.2]  # default weights
    # stiff_weights = [50, 20, 15, 10, 3, 1, 0.35, 0.2]
    # stiff_weights = [50, 40, 30, 20, 10, 8, 5, 2]
    # stiff_weights = [50, 20, 10, 5, 2, 1, 0.5, 0.2]
    # stiff_weights = [1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000]

    # load pointcloud surrey model as src
    src = m3io.import_mesh(fitted_obj)

    # load scan mesh as dest
    dest = m3io.import_mesh(gt_obj)
    #print('destination loaded')

    # add landmarks to mesh
    src.landmarks['myLM'] = menpo.shape.PointCloud(fitted_imp_3d_points)
    dest.landmarks['myLM'] = menpo.shape.PointCloud(gt_imp_3d_points)
    #print('landmarks loaded')

    # non rigid icp pointcloud as result
    #marc org
    result = non_rigid_icp(src,
                           dest,
                           eps=1e-3,
                           landmark_group='myLM',
                           stiffness_weights=stiff_weights,
                           data_weights=None,
                           landmark_weights=lm_weights,
                           generate_instances=False,
                           verbose=False)

    # export the result mesh
    export_mesh(result, output_obj, extension='.obj', overwrite=True)
コード例 #3
0
def calculate_errors_3dMDLab(path_fits, path_gt, model):
    r"""
    Parameters
    ----------
     path_fits: 'string' of the directory that contains your reconstructed
                meshes. Meshes' filenames should be the same with the
                corresponding images/ground truth meshes filenames + the suffix
                of the mesh type (e.g .obj, .ply)
     path_gt: 'string' of the directory that contains the ground truth meshes.
     model: 'string' of the model template you are using. Should be one of the
            following: 'LSFM', 'Basel', 'Surrey'
    Returns
    -------
     errors: python list that contains the error per vertex for all meshes
    """
    path_fits = Path(path_fits)
    path_gt = Path(path_gt)

    # load meshes' filenames
    filenames = [p.name for p in path_fits.glob('*')]
    filenames.sort()

    errors = [0] * len(filenames)
    for i, filename in enumerate(print_progress(filenames)):

        # load gt_mesh, fit_3d
        gt_mesh = m3io.import_mesh(path_gt / filename, texture_resolver=None)
        gt_mesh.landmarks['ibug49'] = PointCloud(
            gt_mesh.points[lms_indexes][19:])
        fit_3d = m3io.import_mesh(path_fits / filename, texture_resolver=None)

        if model == 'Surrey':
            lms = face_ibug_49_to_face_ibug_49(
                PointCloud(fit_3d.points[load_eos_low_res_lm_index()]))
            fit_3d = upsample_eos_low_res_to_fw_no_texture(fit_3d)
            fit_3d.landmarks['ibug49'] = lms
        elif model == 'LSFM' or model == 'Basel':
            fit_3d.landmarks['ibug49'] = PointCloud(
                fit_3d.points[lms_indexes][19:])
        else:
            print('Error: Not supported model template')
            return

        # calculate per vertex errors
        gt_mesh, eval_mask = landmark_and_mask_gt_mesh(gt_mesh, distance=1)
        errors[i], _, _ = mask_align_and_calculate_dense_error(
            fit_3d, gt_mesh, eval_mask)

    return errors
コード例 #4
0
def test_custom_landmark_logic_bunny():
    def f(path):
        lmark_dict = {
            'no_nose': path.with_name('bunny_no_nose.ljson'),
            'full_set': path.with_name('bunny.ljson')
        }
        return menpo.io.input.resolve_from_paths(lmark_dict)

    mesh = mio.import_mesh(mio.data_path_to('bunny.obj'), landmark_resolver=f)
    assert ('no_nose' in mesh.landmarks.group_labels)
    lms = mesh.landmarks['no_nose']
    labels = {'reye', 'mouth', 'leye'}
    assert (len(set(lms.labels) - labels) == 0)
    assert_allclose(lms.with_labels('leye').points, bunny_leye, atol=1e-7)
    assert_allclose(lms.with_labels('reye').points, bunny_reye, atol=1e-7)
    assert_allclose(lms.with_labels('mouth').points, bunny_mouth, atol=1e-7)

    assert ('full_set' in mesh.landmarks.group_labels)
    lms = mesh.landmarks['full_set']
    labels = {'reye', 'mouth', 'nose', 'leye'}
    assert (len(set(lms.labels) - labels) == 0)
    assert_allclose(lms.with_labels('leye').points, bunny_leye, atol=1e-7)
    assert_allclose(lms.with_labels('reye').points, bunny_reye, atol=1e-7)
    assert_allclose(lms.with_labels('nose').points, bunny_nose, atol=1e-7)
    assert_allclose(lms.with_labels('mouth').points, bunny_mouth, atol=1e-7)
コード例 #5
0
def test_custom_landmark_logic_bunny():
    def f(path):
        lmark_dict = {
            "no_nose": path.with_name("bunny_no_nose.ljson"),
            "full_set": path.with_name("bunny.ljson"),
        }
        return menpo.io.input.resolve_from_paths(lmark_dict)

    mesh = mio.import_mesh(mio.data_path_to("bunny.obj"), landmark_resolver=f)
    assert "no_nose" in mesh.landmarks.group_labels
    lms = mesh.landmarks["no_nose"]
    labels = {"reye", "mouth", "leye"}
    assert len(set(lms.labels) - labels) == 0
    assert_allclose(lms.with_labels("leye").points, bunny_leye, atol=1e-7)
    assert_allclose(lms.with_labels("reye").points, bunny_reye, atol=1e-7)
    assert_allclose(lms.with_labels("mouth").points, bunny_mouth, atol=1e-7)

    assert "full_set" in mesh.landmarks.group_labels
    lms = mesh.landmarks["full_set"]
    labels = {"reye", "mouth", "nose", "leye"}
    assert len(set(lms.labels) - labels) == 0
    assert_allclose(lms.with_labels("leye").points, bunny_leye, atol=1e-7)
    assert_allclose(lms.with_labels("reye").points, bunny_reye, atol=1e-7)
    assert_allclose(lms.with_labels("nose").points, bunny_nose, atol=1e-7)
    assert_allclose(lms.with_labels("mouth").points, bunny_mouth, atol=1e-7)
コード例 #6
0
ファイル: io.py プロジェクト: redstorm-fyy/lsfm
def import_mesh(path):
    if path.suffix == '.pkl' or path.suffix == '.gz':
        mesh = import_pickle(path)
    else:
        mesh = m3io.import_mesh(path)
    if hasattr(mesh, "texture") and mesh.texture.pixels.dtype != np.float64:
        mesh.texture.pixels = normalize_pixels_range(mesh.texture.pixels)
    return mesh
コード例 #7
0
ファイル: eos.py プロジェクト: Vagver/itwmm_evaluation
def load_eos_output(image_path, output_dir):
    image_stem = Path(image_path).stem
    mesh = m3io.import_mesh(Path(output_dir) / (image_stem + '.obj'))

    lms = face_ibug_49_to_face_ibug_49(
        PointCloud(mesh.points[load_eos_low_res_lm_index()]))
    mesh_upsampled = upsample_eos_low_res_to_fw(mesh)
    mesh_upsampled.landmarks['ibug49'] = lms
    return {'raw_fit': mesh, 'corresponded_fit': mesh_upsampled}
コード例 #8
0
ファイル: data_provider.py プロジェクト: trigeorgis/ibugnet
    def __init__(self, batch_size=1):
        super().__init__(batch_size)

        from menpo.transform import Translation, scale_about_centre
        import menpo3d.io as m3dio

        self.name = 'LFPW'
        self.batch_size = batch_size
        self.root = Path('/vol/atlas/databases/lfpw/trainset')
        template = m3dio.import_mesh(
            '/vol/construct3dmm/regression/src/template.obj')
        template = Translation(-template.centre()).apply(template)
        self.template = scale_about_centre(template, 1. /
                                           1000.).apply(template)
        self.image_extension = '.png'
        self.lms_extension = '.pts'
コード例 #9
0
ファイル: data_provider.py プロジェクト: trigeorgis/ibugnet
    def __init__(self, batch_size=1):
        super().__init__(batch_size)
        from menpo.transform import Translation, scale_about_centre
        import menpo3d.io as m3dio

        self.name = 'FDDB'
        self.root = Path('/vol/atlas/databases/fddb_ibug')
        template = m3dio.import_mesh(
            '/vol/construct3dmm/regression/src/template.obj')
        template = Translation(-template.centre()).apply(template)
        self.template = scale_about_centre(template, 1. /
                                           1000.).apply(template)
        pca_path = '/homes/gt108/Projects/ibugface/pose_settings/pca_params.pkl'
        self.eigenvectors, self.eigenvalues, self.h_mean, self.h_max = mio.import_pickle(
            pca_path)
        self.image_extension = '.jpg'
        self.lms_extension = '.ljson'
コード例 #10
0
ファイル: io_input_test.py プロジェクト: lydonchandra/menpo3d
def test_custom_landmark_logic_bunny():
    def f(mesh):
        return {"no_nose": mesh.path.with_name("bunny_no_nose.ljson"), "full_set": mesh.path.with_name("bunny.ljson")}

    mesh = mio.import_mesh(mio.data_path_to("bunny.obj"), landmark_resolver=f)
    assert "no_nose" in mesh.landmarks.group_labels
    lms = mesh.landmarks["no_nose"]
    labels = {"reye", "mouth", "leye"}
    assert len(set(lms.labels) - labels) == 0
    assert_allclose(lms["leye"].points, bunny_leye, atol=1e-7)
    assert_allclose(lms["reye"].points, bunny_reye, atol=1e-7)
    assert_allclose(lms["mouth"].points, bunny_mouth, atol=1e-7)

    assert "full_set" in mesh.landmarks.group_labels
    lms = mesh.landmarks["full_set"]
    labels = {"reye", "mouth", "nose", "leye"}
    assert len(set(lms.labels) - labels) == 0
    assert_allclose(lms["leye"].points, bunny_leye, atol=1e-7)
    assert_allclose(lms["reye"].points, bunny_reye, atol=1e-7)
    assert_allclose(lms["nose"].points, bunny_nose, atol=1e-7)
    assert_allclose(lms["mouth"].points, bunny_mouth, atol=1e-7)
コード例 #11
0
def import_mesh(path, hasTexture=False, landmark_type='ibug68'):
    if path.suffix == '.pkl' or path.suffix == '.gz':
        mesh = import_pickle(path)
    else:
        mesh = m3io.import_mesh(path)
    if hasTexture:
        if mesh.texture.pixels.dtype != np.float64:
            mesh.texture.pixels = normalize_pixels_range(mesh.texture.pixels)
    else:
        landmark = []
        count = 0
        with open(str(path) + '.landmark') as pp_file:
            pp_file = csv.reader(pp_file, delimiter=' ')
            for row in pp_file:
                count = count + 1
                if landmark_type == 'ibug100':
                    if count >= 1 and count <= 100:
                        landmark.append(
                            [float(row[0]),
                             float(row[1]),
                             float(row[2])])
                if landmark_type == 'ibug68':
                    if count < 69:
                        landmark.append(
                            [float(row[0]),
                             float(row[1]),
                             float(row[2])])
                if landmark_type == 'ibugEar':
                    if count >= 78 and count <= 88:
                        landmark.append(
                            [float(row[0]),
                             float(row[1]),
                             float(row[2])])
                    if count >= 90 and count <= 100:
                        landmark.append(
                            [float(row[0]),
                             float(row[1]),
                             float(row[2])])
        mesh.landmarks[landmark_type] = PointCloud(np.array(landmark))
    return mesh
コード例 #12
0
ファイル: io_input_test.py プロジェクト: Loubnar/menpo3d
def test_custom_landmark_logic_bunny():
    def f(path):
        return {
            'no_nose': path.with_name('bunny_no_nose.ljson'),
            'full_set': path.with_name('bunny.ljson')
        }
    mesh = mio.import_mesh(mio.data_path_to('bunny.obj'), landmark_resolver=f)
    assert('no_nose' in mesh.landmarks.group_labels)
    lms = mesh.landmarks['no_nose']
    labels = {'reye', 'mouth', 'leye'}
    assert(len(set(lms.labels) - labels) == 0)
    assert_allclose(lms['leye'].points, bunny_leye, atol=1e-7)
    assert_allclose(lms['reye'].points, bunny_reye, atol=1e-7)
    assert_allclose(lms['mouth'].points, bunny_mouth, atol=1e-7)

    assert('full_set' in mesh.landmarks.group_labels)
    lms = mesh.landmarks['full_set']
    labels = {'reye', 'mouth', 'nose', 'leye'}
    assert(len(set(lms.labels) - labels) == 0)
    assert_allclose(lms['leye'].points, bunny_leye, atol=1e-7)
    assert_allclose(lms['reye'].points, bunny_reye, atol=1e-7)
    assert_allclose(lms['nose'].points, bunny_nose, atol=1e-7)
    assert_allclose(lms['mouth'].points, bunny_mouth, atol=1e-7)
コード例 #13
0
ファイル: io_input_test.py プロジェクト: yuxiang-zhou/menpo3d
def test_custom_landmark_logic_bunny():
    def f(mesh):
        return {
            'no_nose': mesh.path.with_name('bunny_no_nose.ljson'),
            'full_set': mesh.path.with_name('bunny.ljson')
        }

    mesh = mio.import_mesh(mio.data_path_to('bunny.obj'), landmark_resolver=f)
    assert ('no_nose' in mesh.landmarks.group_labels)
    lms = mesh.landmarks['no_nose']
    labels = {'reye', 'mouth', 'leye'}
    assert (len(set(lms.labels) - labels) == 0)
    assert_allclose(lms['leye'].points, bunny_leye, atol=1e-7)
    assert_allclose(lms['reye'].points, bunny_reye, atol=1e-7)
    assert_allclose(lms['mouth'].points, bunny_mouth, atol=1e-7)

    assert ('full_set' in mesh.landmarks.group_labels)
    lms = mesh.landmarks['full_set']
    labels = {'reye', 'mouth', 'nose', 'leye'}
    assert (len(set(lms.labels) - labels) == 0)
    assert_allclose(lms['leye'].points, bunny_leye, atol=1e-7)
    assert_allclose(lms['reye'].points, bunny_reye, atol=1e-7)
    assert_allclose(lms['nose'].points, bunny_nose, atol=1e-7)
    assert_allclose(lms['mouth'].points, bunny_mouth, atol=1e-7)
コード例 #14
0
ファイル: io_input_test.py プロジェクト: trigeorgis/menpo3d
def test_custom_landmark_logic_None_bunny():
    def f(mesh):
        return None

    mesh = mio.import_mesh(mio.data_path_to('bunny.obj'), landmark_resolver=f)
    assert (mesh.landmarks.n_groups == 0)
コード例 #15
0
ファイル: io_input_test.py プロジェクト: Loubnar/menpo3d
def test_custom_landmark_logic_None_bunny():
    def f(mesh):
        return None
    mesh = mio.import_mesh(mio.data_path_to('bunny.obj'), landmark_resolver=f)
    assert(mesh.landmarks.n_groups == 0)
コード例 #16
0
def main():

    test_data_directory = ('./test_data_face/render')

    # load obj
    face_mesh = m3io.import_mesh('./test_data_face/mesh.obj')

    texture_index = (face_mesh.tcoords.points[:, ::-1] *
                     face_mesh.texture.shape).astype(np.int32)

    vertex_color = face_mesh.texture.pixels[:, 1 - texture_index[:, 0],
                                            texture_index[:, 1]].T

    tf.reset_default_graph()
    # Set up a basic cube centered at the origin, with vertex normals pointing
    # outwards along the line from the origin to the cube vertices:
    face_vertices = tf.constant(face_mesh.points, dtype=tf.float32)
    face_normals = tf.nn.l2_normalize(face_vertices, dim=1)
    face_triangles = tf.constant(face_mesh.trilist, dtype=tf.int32)

    # testRendersSimpleCube:
    """Renders a simple cube to test the full forward pass.

    Verifies the functionality of both the custom kernel and the python wrapper.
    """

    n_randering = 16

    model_transforms = camera_utils.euler_matrices(
        tf.random_uniform([n_randering, 3]) * np.pi / 2 -
        np.pi / 4.)[:, :3, :3]

    vertices_world_space = tf.matmul(tf.stack(
        [face_vertices for _ in range(n_randering)]),
                                     model_transforms,
                                     transpose_b=True)

    normals_world_space = tf.matmul(tf.stack(
        [face_normals for _ in range(n_randering)]),
                                    model_transforms,
                                    transpose_b=True)

    # camera position:
    eye = tf.constant(n_randering * [[0.0, 0.0, 6.0]], dtype=tf.float32)
    center = tf.constant(n_randering * [[0.0, 0.0, 0.0]], dtype=tf.float32)
    world_up = tf.constant(n_randering * [[0.0, 1.0, 0.0]], dtype=tf.float32)
    ambient_colors = tf.constant(n_randering * [[0.2, 0.2, 0.2]],
                                 dtype=tf.float32)
    image_width = 256
    image_height = 256
    light_positions = tf.constant(n_randering *
                                  [[[6.0, 6.0, 6.0], [-6.0, -6.0, 6.0]]])
    light_intensities = tf.ones([n_randering, 1, 3], dtype=tf.float32)
    vertex_diffuse_colors = tf.constant(np.stack(
        [vertex_color for _ in range(n_randering)]),
                                        dtype=tf.float32)

    rendered = mesh_renderer.mesh_renderer(
        vertices_world_space,
        triangles=face_triangles,
        normals=normals_world_space,
        diffuse_colors=vertex_diffuse_colors,
        camera_position=eye,
        camera_lookat=center,
        camera_up=world_up,
        light_positions=light_positions,
        light_intensities=light_intensities,
        image_width=image_width,
        image_height=image_height,
        ambient_color=ambient_colors)

    image_id = 0
    with tf.Session() as sess:
        fps_list = []
        while (image_id < 100):
            start_time = time.time()
            images = sess.run(rendered, feed_dict={})
            for image in images:
                target_image_name = 'Gray_face_%i.png' % image_id
                image_id += 1
                baseline_image_path = os.path.join(test_data_directory,
                                                   target_image_name)

                mio.export_image(Image.init_from_channels_at_back(
                    image[..., :3].clip(0, 1)),
                                 baseline_image_path,
                                 overwrite=True)

            end_time = time.time()
            fps = n_randering / (end_time - start_time)
            fps_list.append(fps)
            if len(fps_list) > 5:
                fps_list.pop(0)
            print(np.mean(fps_list))
コード例 #17
0
def main():
    # hyperparameters
    BATCH_SIZE = FLAGS.batch_size
    N_VERTICES = 5023
    EMBEDING = 8
    INPUT_CHANNEL = 6 if FLAGS.ref_model == 'lsfm_rgb' or FLAGS.ref_model.startswith(
        'mein3d') else 3
    LR = FLAGS.lr
    FILTERS = [16, 16, 16, 32]
    LOGDIR = FLAGS.logdir if 'model_' in FLAGS.logdir else "{}/model_{}".format(
        FLAGS.logdir, int(time.time()))

    # referece model
    if FLAGS.ref_model.startswith('lsfm') or FLAGS.ref_model == 'mein3d':
        N_VERTICES = 53215
        shape_model = mio.import_pickle(FLAGS.meta_path + '/all_all_all.pkl')
        trilist = shape_model.instance([]).trilist
        EMBEDING = 128
        FILTERS = [16, 32, 32, 64]

    elif FLAGS.ref_model == 'mein3dcrop':
        N_VERTICES = 28431
        face_mean_crop = m3io.import_mesh(FLAGS.meta_path +
                                          '/face_mean_mesh_crop.obj')
        trilist = face_mean_crop.trilist
        EMBEDING = 256
        FILTERS = [16, 32, 32, 64]

    elif FLAGS.ref_model == 'mein3dcropmesh':
        N_VERTICES = 28431
        face_mean_crop = m3io.import_mesh(FLAGS.meta_path +
                                          '/face_mean_mesh_crop.obj')
        trilist = face_mean_crop.trilist
        EMBEDING = 128
        FILTERS = [16, 32, 32, 64]
        INPUT_CHANNEL = 3

    elif FLAGS.ref_model == 'coma':
        N_VERTICES = 5023
        trilist = mio.import_pickle(FLAGS.meta_path + '/coma_f.pkl',
                                    encoding='latin1')

    elif FLAGS.ref_model == '4dfab':
        N_VERTICES = 2064
        template_mesh = m3io.import_mesh(
            '/vol/atlas/homes/Shiyang/CVPR18/code/animation/COMA/coma/data/afm_l1_cropped_final.obj'
        )
        trilist = template_mesh.trilist

    else:
        raise Exception('Undefined ref_model: {}'.format(FLAGS.ref_model))

    graph_laplacians, downsampling_matrices, upsamling_matrices, adj_matrices = mio.import_pickle(
        FLAGS.meta_path + '/{}_LDUA.pkl'.format(FLAGS.ref_model),
        encoding='latin1')

    def build_data():
        class MeshRandomSample(dm.utils.Sequence):
            def __init__(self, batch_size=BATCH_SIZE):
                self.batch_size = batch_size
                self.size = 10000 // BATCH_SIZE
                super().__init__()

            def __len__(self):
                return self.size

            def __getitem__(self, idx):
                batch_sample_mesh = np.array([
                    shape_model.instance(np.random.sample([50]) * 3,
                                         normalized_weights=True).points
                    for _ in range(self.batch_size)
                ])

                return [batch_sample_mesh], [batch_sample_mesh]

        class NumpyMesh(dm.utils.Sequence):
            def __init__(self,
                         fp,
                         batch_size=BATCH_SIZE,
                         scale=1,
                         normalize=False):
                self.train_mesh = np.load(fp)
                if normalize:
                    self.train_mesh /= np.max(
                        np.abs([self.train_mesh.max(),
                                self.train_mesh.min()])) + 1
                self.batch_size = batch_size
                self.size = self.train_mesh.shape[0]
                self.indexes = list(range(self.size))
                self.scale = scale
                np.random.shuffle(self.indexes)
                super().__init__()

            def __len__(self):
                return self.size // self.batch_size

            def __getitem__(self, idx):
                indexes = self.indexes[idx * self.batch_size:(idx + 1) *
                                       self.batch_size]
                batch_sample_mesh = np.array(
                    [self.train_mesh[i] * self.scale for i in indexes])

                return [batch_sample_mesh], [batch_sample_mesh]

            def on_epoch_end(self, *args, **kwargs):
                np.random.shuffle(self.indexes)
                return super().on_epoch_end()

        class H5Mesh(dm.utils.Sequence):
            def __init__(self,
                         fp,
                         dataset,
                         batch_size=BATCH_SIZE,
                         with_rgb=True):
                self.train_mesh = h5py.File(fp, 'r')[dataset]
                self.batch_size = batch_size
                self.size = self.train_mesh.len()
                self.indexes = list(range(self.size))
                self.with_rgb = with_rgb
                np.random.shuffle(self.indexes)
                super().__init__()

            def __len__(self):
                return self.size // self.batch_size

            def __getitem__(self, idx):
                batch_sample_mesh = np.array([
                    self.train_mesh[self.indexes[i]]
                    for i in range(idx *
                                   self.batch_size, idx * self.batch_size +
                                   self.batch_size)
                ])

                if not self.with_rgb:
                    batch_sample_mesh = batch_sample_mesh[..., :3]

                return [batch_sample_mesh], [batch_sample_mesh]

            def on_epoch_end(self, *args, **kwargs):
                np.random.shuffle(self.indexes)
                return super().on_epoch_end()

        if FLAGS.ref_model == 'lsfm':
            return MeshRandomSample(batch_size=BATCH_SIZE)

        elif FLAGS.ref_model == 'lsfm_rgb':
            return H5Mesh(FLAGS.meta_path + '/../lsfm_texture_train.h5',
                          'lsfm_colour',
                          batch_size=BATCH_SIZE)

        elif FLAGS.ref_model == 'mein3d':
            return H5Mesh(FLAGS.meta_path + '/../mein3d.h5',
                          'colour_mesh',
                          batch_size=BATCH_SIZE)

        elif FLAGS.ref_model == 'mein3dcrop':
            return H5Mesh(FLAGS.meta_path + '/../mein3dcrop.h5',
                          'colour_mesh',
                          batch_size=BATCH_SIZE)

        elif FLAGS.ref_model == 'mein3dcropmesh':
            return H5Mesh(FLAGS.meta_path + '/../mein3dcropmesh.h5',
                          'mesh',
                          batch_size=BATCH_SIZE)

        elif FLAGS.ref_model == 'coma':
            return NumpyMesh(
                '/homes/yz4009/wd/gitdev/coma/data/bareteeth/train.npy',
                batch_size=BATCH_SIZE,
                scale=5)

        elif FLAGS.ref_model == '4dfab':
            return NumpyMesh(
                '/vol/atlas/homes/Shiyang/CVPR18/code/animation/COMA/coma/data/RECON4DFAB/AFM/train.npy',
                batch_size=BATCH_SIZE,
                normalize=True)

        return None

    def build_model(inputs_channels=3):
        input_mesh = dm.layers.Input(shape=[N_VERTICES, inputs_channels],
                                     name='input_mesh')

        mesh_embedding = dm.networks.MeshEncoder(input_mesh,
                                                 EMBEDING,
                                                 graph_laplacians,
                                                 downsampling_matrices,
                                                 filter_list=FILTERS)
        output_mesh = dm.networks.MeshDecoder(mesh_embedding,
                                              inputs_channels,
                                              graph_laplacians,
                                              adj_matrices,
                                              upsamling_matrices,
                                              polynomial_order=6,
                                              filter_list=FILTERS)

        # wrapping input and output
        mesh_ae = dm.DeepMachine(inputs=input_mesh, outputs=[output_mesh])

        n_gpu = len(FLAGS.gpu.split(','))
        if n_gpu > 1:
            mesh_ae = multi_gpu_model(mesh_ae, gpus=n_gpu)

        # compile model with optimizer
        mesh_ae.compile(optimizer=dm.optimizers.Adam(lr=LR), loss=['mae'])

        # ---------------- rendering layer ------------
        mesh_to_render = dm.layers.Input(shape=[N_VERTICES, 3],
                                         name='mesh_to_render')
        mesh_to_render.set_shape([BATCH_SIZE, N_VERTICES, 3])
        vertex_color = dm.layers.Input(shape=[N_VERTICES, 3],
                                       name='vertex_color')
        vertex_color.set_shape([BATCH_SIZE, N_VERTICES, 3])

        # Build vertices and normals
        mesh_vertices = mesh_to_render
        mesh_vertices.set_shape([BATCH_SIZE, N_VERTICES, 3])
        mesh_normals = tf.nn.l2_normalize(mesh_vertices, axis=2)
        mesh_normals.set_shape([BATCH_SIZE, N_VERTICES, 3])

        # rendering output
        mesh_triangles = tf.constant(trilist, dtype=tf.int32)

        # camera position:
        eye = tf.constant(BATCH_SIZE * [[0.0, 0.0, -2.0]], dtype=tf.float32)
        center = tf.constant(BATCH_SIZE * [[0.0, 0.0, 0.0]], dtype=tf.float32)
        world_up = tf.constant(BATCH_SIZE * [[1.0, 0.0, 0.0]],
                               dtype=tf.float32)
        ambient_colors = tf.constant(BATCH_SIZE * [[1., 1., 1.]],
                                     dtype=tf.float32) * 0.1
        light_positions = tf.constant(BATCH_SIZE * [[[2.0, 2.0, 2.0]]]) * 3.
        light_intensities = tf.ones([BATCH_SIZE, 1, 3], dtype=tf.float32)

        render_mesh = dm.layers.Renderer(
            # image size
            image_width=256,
            image_height=256,
            # mesh definition
            triangles=mesh_triangles,
            normals=mesh_normals,
            # colour definition
            diffuse_colors=vertex_color,
            ambient_color=ambient_colors,
            # camera definition
            camera_position=eye,
            camera_lookat=center,
            camera_up=world_up,
            # light definition
            light_positions=light_positions,
            light_intensities=light_intensities,
        )(mesh_vertices)

        mesh_render = dm.DeepMachine(inputs=[mesh_to_render, vertex_color],
                                     outputs=[render_mesh])
        # ----------------------

        return mesh_ae, mesh_render

    def custom_summary(train_x, train_y, predict_y, epoch):
        def render_mesh(sample_mesh, res=256, scale=1):

            if sample_mesh.shape[-1] != 3:
                sample_colours = sample_mesh[..., 3:]
            else:
                sample_colours = np.ones_like(sample_mesh) * [0, 0, 1]
            sample_mesh = sample_mesh[..., :3]

            sample_mesh = Homogeneous(
                dm.utils.rotation_matrix(np.deg2rad(90),
                                         [0, 0, -1])).apply(sample_mesh)
            sample_mesh = ColouredTriMesh(sample_mesh * scale * res / 2 +
                                          res / 2,
                                          trilist=trilist,
                                          colours=sample_colours)
            sample_mesh = lambertian_shading(sample_mesh, ambient_colour=0)
            store_path = Path(LOGDIR) / str(epoch)
            if not store_path.exists():
                store_path.mkdir()

            m3io.export_mesh(sample_mesh,
                             store_path / '{}.obj'.format(time.time()))

            mesh_img = rasterize_mesh(sample_mesh, [res, res])
            mesh_img = mesh_img.rotate_ccw_about_centre(180)

            return mesh_img.pixels_with_channels_at_back()

        def dr_render_mesh(sample_mesh):
            if sample_mesh.shape[-1] != 3:
                sample_colours = sample_mesh[..., 3:]
            else:
                sample_colours = np.ones_like(sample_mesh) * [0, 0, 1]
            sample_mesh = sample_mesh[..., :3]

            return renderer.predict([sample_mesh, sample_colours])[..., :3]

        return {
            'target/mesh': np.array(list(map(render_mesh, train_y[0][:4]))),
            'output/mesh': np.array(list(map(render_mesh, predict_y[0][:4]))),
            'target/dr_mesh': dr_render_mesh(train_y[0]),
            'output/dr_mesh': dr_render_mesh(predict_y[0]),
        }

    training_generator = build_data()
    auto_encoder, renderer = build_model(inputs_channels=INPUT_CHANNEL)

    results = auto_encoder.fit(training_generator,
                               epochs=1000,
                               lr_decay=FLAGS.lr_decay,
                               logdir=LOGDIR,
                               verbose=2,
                               workers=FLAGS.no_thread,
                               summary_ops=[custom_summary])
コード例 #18
0
# lm_weights = [5, 2, .5, 0, 0, 0, 0, 0]  # default weights
# lm_weights = [10, 8, 5, 3, 2, 0.5, 0, 0]
lm_weights = [25, 20, 15, 10, 8, 5, 3, 1]
# lm_weights = [2, 1, 0, 0, 0, 0, 0, 0]
# lm_weights = [25, 20, 15, 10, 5, 2, 1, 0]
# lm_weights = [100, 0, 0, 0, 0, 0, 0, 0]
# lm_weights = [1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000]

stiff_weights = [50, 20, 5, 2, 0.8, 0.5, 0.35, 0.2]  # default weights
# stiff_weights = [50, 20, 15, 10, 3, 1, 0.35, 0.2]
# stiff_weights = [50, 40, 30, 20, 10, 8, 5, 2]
# stiff_weights = [50, 20, 10, 5, 2, 1, 0.5, 0.2]
# stiff_weights = [1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000]

# load pointcloud surrey model as src
src = m3io.import_mesh(src_path)
print('source loaded')


# load scan mesh as dest
#dest = m3io.import_mesh(dest_path)
#print('destination loaded')

# load landmark pointcloud as lm
src_lm_file = open(src_lm_path, 'r')
#dest_lm_file = open(dest_lm_path, 'r')
src_lm = np.loadtxt(src_lm_file)
#dest_lm = np.loadtxt(dest_lm_file)

print (src_lm)
コード例 #19
0
def main():
    dm.K.clear_session()
    dm.K.set_learning_phase(1) #set learning phase

    # hyperparameters
    BATCH_SIZE = FLAGS.batch_size
    LR = FLAGS.lr
    LOGDIR = FLAGS.logdir if 'model_' in FLAGS.logdir else "{}/model_{}".format(
        FLAGS.logdir, int(time.time()))
    N_VERTICES = 28431
    EMBEDING = 128
    CAMERA_PARAM = 12
    INPUT_SHAPE = 112
    FILTERS = [16, 32, 32, 64]

    # globel constant
    n_gpu = len(FLAGS.gpu.split(','))
    face_mean_crop = m3io.import_mesh(FLAGS.meta_path + '/face_mean_mesh_crop.obj')
    trilist = face_mean_crop.trilist
    graph_laplacians, downsampling_matrices, upsamling_matrices, adj_matrices = mio.import_pickle(
        FLAGS.meta_path + '/mein3dcrop_LDUA.pkl', encoding='latin1')

    def build_data():
        
        class H5Mesh(dm.utils.Sequence):

            def __init__(self, fp, dataset, batch_size=BATCH_SIZE):
                self.train_mesh = h5py.File(fp, 'r')[dataset]
                self.batch_size = batch_size
                self.size = self.train_mesh.len()
                self.indexes = list(range(self.size))
                np.random.shuffle(self.indexes)
                super().__init__()

            def __len__(self):
                return self.size // self.batch_size

            def __getitem__(self, idx):
                indexes = self.indexes[idx * self.batch_size: (idx + 1) * self.batch_size]
                batch_sample_mesh = np.array([
                    self.train_mesh[i] for i in indexes
                ])

                return [batch_sample_mesh], [batch_sample_mesh]

            def on_epoch_end(self, *args, **kwargs):
                np.random.shuffle(self.indexes)
                return super().on_epoch_end()

        class ImageSequence(dm.utils.Sequence):

            def __init__(self, dirpath, batch_size=BATCH_SIZE):
                self.detection = pd.read_csv('/homes/yz4009/db/face/loose_landmark_test.csv')
                self.size = self.detection.shape[0]
                self.image_path = Path(dirpath)
                self.batch_size = BATCH_SIZE
                self.indexes = list(range(self.size))
                np.random.shuffle(self.indexes)

            def on_epoch_end(self, *args, **kwargs):
                np.random.shuffle(self.indexes)

            def __len__(self):
                return self.size // self.batch_size

            def _preprocess(self, idx):
                name, *lms5pt = self.detection.loc[idx]
                lms5pt = PointCloud(np.array(lms5pt).reshape([-1,2])[:,::-1])
                img = mio.import_image((self.image_path/name).with_suffix('.jpg'))
                cimg, _, _ = dm.utils.crop_image_bounding_box(img, lms5pt.bounding_box(), [112, 112], base=186)

                return cimg.pixels_with_channels_at_back() * 2 - 1

            def __getitem__(self, idx):
                image_indexes = self.indexes[
                    idx * self.batch_size: (idx + 1) * self.batch_size]

                batch_img = [self._preprocess(i) for i in image_indexes]

                return [np.array(batch_img)], [np.array(batch_img)]
        
        return H5Mesh('/homes/yz4009/wd/gitdev/coma/data/mein3dcrop.h5', 'colour_mesh', batch_size=BATCH_SIZE), ImageSequence(FLAGS.dataset_path)

    def build_model(inputs_channels=6, n_gpu=n_gpu):

        # define components
        ## image encoder
        def build_img_encoder():
            input_img = dm.layers.Input(shape=[INPUT_SHAPE, INPUT_SHAPE, 3], name='input_img')

            img_embedding = dm.networks.Encoder2D(
                input_img, EMBEDING + CAMERA_PARAM, depth=4, nf=64)
            mesh_rec_embeding = dm.layers.Lambda(lambda x: x[..., :EMBEDING])(img_embedding)
            cam_rec_embeding = dm.layers.Lambda(lambda x: dm.K.tanh(x[..., EMBEDING:]) * 3)(img_embedding)

            return dm.Model(input_img, [mesh_rec_embeding, cam_rec_embeding], name='image_encoder')

        ## mesh encoder
        def build_mesh_encoder():
            input_mesh = dm.layers.Input(shape=[N_VERTICES, inputs_channels], name='input_mesh')
            mesh_embedding = dm.networks.MeshEncoder(
                input_mesh, EMBEDING, graph_laplacians, downsampling_matrices, filter_list=FILTERS)

            return dm.Model(input_mesh, mesh_embedding, name='mesh_encoder')

        ## common decoder
        def build_decoder():
            input_embeding = dm.layers.Input(shape=[EMBEDING], name='input_embeding')
            output_mesh = dm.networks.MeshDecoder(
                input_embeding, 
                inputs_channels, 
                graph_laplacians, 
                adj_matrices, 
                upsamling_matrices, 
                polynomial_order=6, 
                filter_list=FILTERS)

            return dm.Model(input_embeding, output_mesh, name='decoder')

        ## renderer
        def build_renderer(mesh_vertices, vertex_color, cam_parameter):
            # mesh_vertices = dm.layers.Input(shape=[N_VERTICES, 3], name='mesh_vertices')
            mesh_vertices.set_shape([BATCH_SIZE, N_VERTICES, 3])
            # vertex_color = dm.layers.Input(shape=[N_VERTICES, 3], name='vertex_color')
            vertex_color.set_shape([BATCH_SIZE, N_VERTICES, 3])
            # cam_parameter = dm.layers.Input(shape=[CAMERA_PARAM], name='cam_parameter')
            cam_parameter.set_shape([BATCH_SIZE, CAMERA_PARAM])

            # Build vertices and normals
            mesh_normals = tf.nn.l2_normalize(mesh_vertices, axis=2)

            # rendering output
            mesh_triangles = tf.constant(trilist, dtype=tf.int32)
            
            # camera position:
            eye = cam_parameter[...,:3]
            center = cam_parameter[...,3:6]
            world_up = cam_parameter[...,6:9]
            light_positions = cam_parameter[:,None,9:12]

            ambient_colors = tf.ones([BATCH_SIZE, 3], dtype=tf.float32) * 0.1
            light_intensities = tf.ones([BATCH_SIZE, 1, 3], dtype=tf.float32)

            render_mesh = dm.layers.Renderer(
                # image size
                image_width=INPUT_SHAPE,
                image_height=INPUT_SHAPE,
                # mesh definition
                triangles=mesh_triangles,
                normals=mesh_normals,
                # colour definition
                diffuse_colors=vertex_color,
                ambient_color=ambient_colors,
                # camera definition
                camera_position=eye,
                camera_lookat=center,
                camera_up=world_up,
                # light definition
                light_positions=light_positions,
                light_intensities=light_intensities,
            )(mesh_vertices)

            render_mesh = dm.layers.Lambda(lambda x: x[..., :3])(render_mesh)

            return render_mesh

        # Mesh AE stream
        ## define inputs
        input_mesh_stream = dm.layers.Input(shape=[N_VERTICES, 6], name='input_mesh_stream')

        ## define components
        mesh_encoder_model = build_mesh_encoder()
        decoder_model = build_decoder()

        ## define connections
        output_mesh = decoder_model(mesh_encoder_model(input_mesh_stream))
        mesh_ae_model = dm.DeepMachine(
            inputs=input_mesh_stream, 
            outputs=output_mesh,
            name='MeshStream'
        )

        ## multi gpu support
        if n_gpu > 1:
            mesh_ae_model = multi_gpu_model(mesh_ae_model, gpus=n_gpu)

        ## compile mesh stream
        mesh_ae_model.compile(
            optimizer=dm.optimizers.Adam(lr=LR),
            loss=['mae']
        )

        ## set trainable
        mesh_ae_model.trainable = False
        decoder_model.trainable = False
        mesh_encoder_model.trainable = False

        # Render Stream
        ## define inputs
        input_image_stream = dm.layers.Input(shape=[INPUT_SHAPE, INPUT_SHAPE, 3], name='input_image_stream')

        ## define components
        img_encoder_model = build_img_encoder()

        ## define connections
        rec_mesh_emb, rec_cam_emb = img_encoder_model(input_image_stream)
        mesh_with_colour = decoder_model(rec_mesh_emb)
        
        mesh_vert = dm.layers.Lambda(lambda x: x[..., :3])(mesh_with_colour)
        mesh_vert.set_shape([BATCH_SIZE, N_VERTICES, 3])
        mesh_colour = dm.layers.Lambda(lambda x: x[..., 3:])(mesh_with_colour)
        mesh_colour.set_shape([BATCH_SIZE, N_VERTICES, 3])
        rec_render = build_renderer(
            mesh_vert,
            mesh_colour,
            rec_cam_emb
        )

        render_model = dm.DeepMachine(
            inputs=input_image_stream, 
            outputs=[rec_render, mesh_with_colour],
            name='ImageStream'
        )
        
        ## multi gpu support
        if n_gpu > 1:
            render_model = multi_gpu_model(render_model, gpus=n_gpu)
        
        ## compile render stream
        render_model.compile(
            optimizer=dm.optimizers.Adam(lr=LR),
            loss=['mae', dm.losses.dummy]
        )

        return render_model, mesh_ae_model, img_encoder_model

    def train_op(models, data, i_epoch, i_batch, epoch_end, training_history=None, **kwargs):
        sess = dm.K.get_session()
        image_stream, mesh_stream, img_encoder  = models
        [train_mesh, train_image], _ = dm.engine.training.generator_adapter(data)

        # ----------------------
        #  Train Mesh Stream
        # ----------------------
        loss_mesh = mesh_stream.train_on_batch([train_mesh], [train_mesh])

        # ------------------
        #  Train Render Stream
        # ------------------
        loss_img = image_stream.train_on_batch([train_image], [train_image, train_mesh])

        logs = dm.utils.Summary(
            {
                "losses/loss_mesh": loss_mesh,
                "losses/loss_img": loss_img[0],
                "learning_rate/mesh": mesh_stream.optimizer.lr.eval(sess),
                "learning_rate/img": image_stream.optimizer.lr.eval(sess),
            }
        )

        if epoch_end:
            ae_mesh = mesh_stream.predict(train_mesh)
            rec_imgs, rec_mesh = image_stream.predict(train_image)
            _, cam_params = img_encoder.predict(train_image)
            logs.update_images({
                'image/input': train_image,
                'image/render': rec_imgs,
                'image/mesh': dm.utils.mesh.render_meshes(rec_mesh[:4], trilist, res=INPUT_SHAPE),
                'mesh/input': dm.utils.mesh.render_meshes(train_mesh[:4], trilist, res=INPUT_SHAPE),
                'mesh/ae': dm.utils.mesh.render_meshes(ae_mesh[:4], trilist, res=INPUT_SHAPE),
            })

            logs.update_scalars(
                {'cam_params/{}'.format(idx_p): p for idx_p, p in enumerate(cam_params[0])}
            )

        return logs

    # prepare data
    train_generator = dm.data.generator.MergeGenerators(*build_data())
    train_queue = enqueue_generator(
        train_generator, workers=FLAGS.no_thread)
    
    # prepare model
    image_stream, mesh_stream, img_encoder = build_model()

    mesh_lr_decay = dm.callbacks.LearningRateScheduler(
        schedule=lambda epoch: LR * FLAGS.lr_decay ** epoch)
    mesh_lr_decay.set_model(mesh_stream)

    image_lr_decay = dm.callbacks.LearningRateScheduler(
        schedule=lambda epoch: LR * FLAGS.lr_decay ** epoch)
    image_lr_decay.set_model(image_stream)

    # training
    history = dm.engine.training.train_monitor(
        [image_stream, mesh_stream, img_encoder],
        train_queue, 
        train_op,
        epochs=200, step_per_epoch=len(train_generator),
        callbacks=[
            train_generator,
            mesh_lr_decay, image_lr_decay
        ],
        verbose=FLAGS.verbose,
        logdir=LOGDIR,
    )
コード例 #20
0
ファイル: basel.py プロジェクト: redstorm-fyy/lsfm
def save_customize_template_from_basel(path):
    import menpo3d.io as m3io
    import lsfm
    template = m3io.import_mesh(path)
    lsfm.landmark.landmark_template(template,verbose=True)
    save_template(template,overwrite=True)