def read_ply(i):
    #filename = 'data-armadillo/verts%d.ply' % (i + 1)
    filename = base_path + '%d.ply' % i
    if (i % 13 == 0):
        print('.', end='', flush=True)
    verts = igl.eigen.MatrixXd()
    igl.readPLY(filename, verts, _i, _d, _d)
    return e2p(verts)
Beispiel #2
0
def read_dmat(i):
    #filename = 'data-armadillo/verts%d.ply' % (i + 1)
    filename = base_path + 'displacements_%d.dmat' % i
    if (i % 13 == 0):
        print('.', end='', flush=True)
    verts = igl.eigen.MatrixXd()
    igl.readDMAT(filename, verts, _i, _d, _d)
    return e2p(verts)
Beispiel #3
0
    def __init__(self,
                 mesh,
                 ratio=0.0,
                 std=0.0,
                 verticeSampling=False,
                 importanceSampling=False):
        self._V = iglhelpers.e2p(mesh.V())
        self._F = iglhelpers.e2p(mesh.F())
        self._sampleVertices = verticeSampling

        if ratio < 0 or ratio > 1:
            raise (ValueError("Ratio must be [0,1]"))

        self._ratio = ratio

        if std < 0 or std > 1:
            raise (ValueError("Normal deviation must be [0,1]"))

        self._std = std

        self._calculateFaceBins()
Beispiel #4
0
def reconstruct_npz(inname, outname):
    """
    Recontruct a 3D shape by deforming a template
    :param inname: input path
    :return: None (but save reconstruction)
    """
    if os.path.exists(outname):
        return
    with np.load(inname) as npl:
        V, F = npl['V'], npl['F']
        V = pca_whiten(V)
        max_axis = np.argmax((np.max(V, axis=0) - np.min(V, axis=0)))
        V = V[:, np.roll(np.arange(3), 1 - max_axis)]  # 1 means Y
        V *= 1.7
    assert (np.max(V, axis=0) - np.min(V, axis=0))[1] > 1.69
    while V.shape[0] < 1e4:
        eV, eF = p2e(V), p2e(F)
        NV, NF = igl.eigen.MatrixXd(), igl.eigen.MatrixXi()
        igl.upsample(eV, eF, NV, NF)
        V, F = e2p(NV), e2p(NF)

    input = trimesh.Trimesh(vertices=V, faces=F, process=False)
    scalefactor = 1.0
    if global_variables.opt.scale:
        input, scalefactor = scale(
            input, global_variables.mesh_ref_LR
        )  #scale input to have the same volume as mesh_ref_LR
    if global_variables.opt.clean:
        input = clean(input)  #remove points that doesn't belong to any edges
    test_orientation(input)

    inp_V = input.vertices
    if inp_V.shape[0] > 1e5:
        inp_V = inp_V[
            np.random.choice(inp_V.shape[0], int(1e5), replace=False), :]
    final_points, final_loss = run(inp_V, scalefactor)

    npz_path = os.path.dirname(outname)
    if not os.path.exists(npz_path): os.makedirs(npz_path)
    np.savez(outname, V=final_points, l=final_loss)
Beispiel #5
0
def load_obj_verts(dir):
    Vs = []
    filenames = sorted(os.listdir(dir))

    for filename in filenames:
        path = os.path.join(dir, filename)
        V = igl.eigen.MatrixXd()
        F = igl.eigen.MatrixXi()
        igl.readOBJ(path, V, F)

        Vs.append(e2p(V))

    return numpy.array(Vs)
Beispiel #6
0
    def _normalizeMesh(self):
        mb = miniball.Miniball(iglhelpers.e2p(self._V))
        scale = BOUNDING_SPHERE_RADIUS / math.sqrt(mb.squared_radius())

        T = igl.eigen.Affine3d()
        T.setIdentity()
        T.translate(
            igl.eigen.MatrixXd([
                -mb.center()[0] * scale, -mb.center()[1] * scale,
                -mb.center()[2] * scale
            ]))
        print("[INFO] scaled down by", scale)
        Vscale = T.matrix().block(0, 0, 3, 3).transpose()
        Vtrans = igl.eigen.MatrixXd(self._V.rows(), self._V.cols())
        Vtrans.rowwiseSet(T.matrix().block(0, 3, 3, 1).transpose())

        self._V = (self._V * Vscale) * scale + Vtrans
Beispiel #7
0
    def query(self, queries):
        """Returns numpy array of SDF values for each point in queries"""
        queryV = iglhelpers.p2e(queries)

        S = igl.eigen.MatrixXd()
        B = igl.eigen.MatrixXd()
        I = igl.eigen.MatrixXi()
        C = igl.eigen.MatrixXd()
        N = igl.eigen.MatrixXd()

        if self._precomputed and self._signType == igl.SIGNED_DISTANCE_TYPE_FAST_WINDING_NUMBER:
            # generate samples from precomputed bvh's
            print("[INFO] Generating SDFs")
            igl.signed_distance_fast_winding_number(queryV, self._V, self._F,
                                                    self._tree, self._fwn_bvh,
                                                    S)
            print("[INFO] SDFs done")
        else:
            igl.signed_distance(queryV, self._V, self._F, self._signType, S, I,
                                C, N)

        return iglhelpers.e2p(S)
class Geom():
    def __init__(self, s=None):
        self.V = igl.eigen.MatrixXd()
        self.F = igl.eigen.MatrixXi()
        if s is not None:
            igl.read_triangle_mesh(s, self.V, self.F)


# Picking Largest Component
from scipy import stats
for path in scans:
    Inmodel = Geom(path)
    C = igl.eigen.MatrixXi()
    igl.facet_components(Inmodel.F, C)

    C = (e2p(C)).flatten()
    modeC, mode_count = stats.mode(C)

    if mode_count == Inmodel.F.rows():
        print(f"Already Fit  {path[-15:-12]}")
        igl.write_triangle_mesh(path[:-12] + '_single.obj', Inmodel.V,
                                Inmodel.F)
    else:
        Fid = p2e(np.where(C == modeC)[0])
        F = igl.slice(Inmodel.F, Fid, 1)
        Outmodel = Geom()
        I, J = igl.eigen.MatrixXi(), igl.eigen.MatrixXi()
        igl.remove_unreferenced(Inmodel.V, F, Outmodel.V, Outmodel.F, I, J)
        igl.write_triangle_mesh(path[:-12] + '_single.obj', Outmodel.V,
                                Outmodel.F)
        print(f'Written { path[-15:-12]}')
Beispiel #9
0
def setup_deformation_transfer(source, target, use_normals=False):
    rows = np.zeros(3 * target.v.shape[0])
    cols = np.zeros(3 * target.v.shape[0])
    coeffs_v = np.zeros(3 * target.v.shape[0])
    coeffs_n = np.zeros(3 * target.v.shape[0])

    print("Computing nearest vertices")

    P = p2e(target.v)
    V = p2e(source.v)
    F = p2e(source.f)

    sqrD = igl.eigen.MatrixXd()
    nearest_faces = igl.eigen.MatrixXi()
    nearest_vertices = igl.eigen.MatrixXd()
    igl.point_mesh_squared_distance(P, V, F, sqrD, nearest_faces,
                                    nearest_vertices)

    print("Computing barycentric coordinates")

    coeffs_v = igl.eigen.MatrixXd()
    Va, Vb, Vc = igl.eigen.MatrixXd(), igl.eigen.MatrixXd(
    ), igl.eigen.MatrixXd()
    F2 = igl.eigen.MatrixXi()
    xyz = p2e(np.array([0, 1, 2]))

    igl.slice(F, nearest_faces, xyz, F2)
    igl.slice(V, F2.col(0), xyz, Va)
    igl.slice(V, F2.col(1), xyz, Vb)
    igl.slice(V, F2.col(2), xyz, Vc)

    igl.barycentric_coordinates(nearest_vertices, Va, Vb, Vc, coeffs_v)

    nearest_faces = e2p(nearest_faces)
    coeffs_v = e2p(coeffs_v).ravel()

    rows = np.array([i for i in range(target.v.shape[0]) for _ in range(3)])
    cols = source.f[nearest_faces].ravel()
    """
    nearest_faces, nearest_parts, nearest_vertices = source.compute_aabb_tree().nearest(target.v, True)
    nearest_faces = nearest_faces.ravel().astype(np.int64)
    nearest_parts = nearest_parts.ravel().astype(np.int64)
    nearest_vertices = nearest_vertices.ravel()

    for i in range(target.v.shape[0]):
        # Closest triangle index
        f_id = nearest_faces[i]
        # Closest triangle vertex ids
        nearest_f = source.f[f_id]

        # Closest surface point
        nearest_v = nearest_vertices[3 * i:3 * i + 3]
        # Distance vector to the closest surface point
        dist_vec = target.v[i] - nearest_v

        rows[3 * i:3 * i + 3] = i * np.ones(3)
        cols[3 * i:3 * i + 3] = nearest_f

        n_id = nearest_parts[i]
        if n_id == 0:
            # Closest surface point in triangle
            A = np.vstack((source.v[nearest_f])).T
            coeffs_v[3 * i:3 * i + 3] = np.linalg.lstsq(A, nearest_v)[0]
        elif n_id > 0 and n_id <= 3:
            # Closest surface point on edge
            A = np.vstack((source.v[nearest_f[n_id - 1]], source.v[nearest_f[n_id % 3]])).T
            tmp_coeffs = np.linalg.lstsq(A, target.v[i])[0]
            coeffs_v[3 * i + n_id - 1] = tmp_coeffs[0]
            coeffs_v[3 * i + n_id % 3] = tmp_coeffs[1]
        else:
            # Closest surface point a vertex
            coeffs_v[3 * i + n_id - 4] = 1.0
        """

    #    if use_normals:
    #        A = np.vstack((vn[nearest_f])).T
    #        coeffs_n[3 * i:3 * i + 3] = np.linalg.lstsq(A, dist_vec)[0]

    #coeffs = np.hstack((coeffs_v, coeffs_n))
    #rows = np.hstack((rows, rows))
    #cols = np.hstack((cols, source.v.shape[0] + cols))
    matrix = sp.csc_matrix((coeffs_v, (rows, cols)),
                           shape=(target.v.shape[0], source.v.shape[0]))
    return matrix
Beispiel #10
0
import pymmgs
import numpy as np
import sys, os
sys.path.insert(0, os.path.expanduser('~/Workspace/libigl/python'))
import pyigl as igl
import pyigl.eigen as Eigen
from iglhelpers import p2e, e2p

V = igl.eigen.MatrixXd()
F = igl.eigen.MatrixXi()
igl.read_triangle_mesh('/Users/zhongshi/1399_standing_clap_000010.obj', V, F)

# SV, SVI, SVJ, SF =  igl.eigen.MatrixXd(),  igl.eigen.MatrixXi(), igl.eigen.MatrixXi(), igl.eigen.MatrixXi()
# igl.remove_duplicate_vertices(V,F,1e-10, SV, SVI, SVJ, SF)
M = igl.eigen.MatrixXd()
igl.doublearea(V, F, M)
M = e2p(M).flatten()

# F0 = e2p(F)[np.where(M > 1e-9)[0],:]

# npl = np.load('/Users/zhongshi/1006_jump_from_wall_000003.obj.npz')
V0, F0 = e2p(V), e2p(F)

# igl.writeOBJ('1399.obj',V, F)
V, F = pymmgs.MMGS(V0, F0, 0.005)
vw = igl.glfw.Viewer()
vw.data().set_mesh(p2e(V), p2e(F))
vw.launch()
def main():
    global verts_sample

    initial_verts, initial_faces = get_initial_verts_and_faces()
    numpy_base_verts = e2p(initial_verts).flatten()

    start = time.time()
    num_samples = 250
    numpy_verts_sample = load_samples(num_samples)
    numpy_displacements_sample = numpy_verts_sample - initial_verts

    num_verts = len(numpy_verts_sample[0])
    print(num_verts)

    print('Loading...')
    verts_sample = [p2e(m) for m in numpy_verts_sample]
    displacements_sample = [p2e(m) for m in numpy_displacements_sample]
    print("Took:", time.time() - start)

    use_pca = False
    if use_pca:
        ### PCA Version
        print("Doing PCA...")
        train_size = num_samples
        test_size = num_samples
        test_data = numpy_displacements_sample[:test_size] * 1.0
        test_data_eigen = verts_sample[:test_size]
        numpy.random.shuffle(numpy_displacements_sample)
        # train_data = numpy_verts_sample[test_size:test_size+train_size]
        train_data = numpy_displacements_sample[0:train_size]

        pca = PCA(n_components=3)
        pca.fit(train_data.reshape((train_size, 3 * num_verts)))

        # def encode(q):
        #     return pca.transform(numpy.array([q.flatten() - numpy_base_verts]))[0]

        # def decode(z):
        #     return (numpy_base_verts + pca.inverse_transform(numpy.array([z]))[0]).reshape((num_verts, 3))

        # print(numpy.equal(test_data[0].flatten().reshape((len(test_data[0]),3)), test_data[0]))
        # print(encode(test_data[0]))

        test_data_encoded = pca.transform(
            test_data.reshape(test_size, 3 * num_verts))
        test_data_decoded = (numpy_base_verts +
                             pca.inverse_transform(test_data_encoded)).reshape(
                                 test_size, num_verts, 3)
        test_data_decoded_eigen = [p2e(m) for m in test_data_decoded]
        ### End of PCA version
    else:
        ### Autoencoder
        import keras
        from keras.layers import Input, Dense
        from keras.models import Model, load_model
        import datetime

        start_time = time.time()

        train_size = num_samples
        test_size = num_samples
        test_data = numpy_displacements_sample[:test_size].reshape(
            test_size, 3 * num_verts)
        test_data_eigen = verts_sample[:test_size]
        # numpy.random.shuffle(numpy_displacements_sample)
        # train_data = numpy_verts_sample[test_size:test_size+train_size]
        train_data = numpy_displacements_sample[0:train_size].reshape(
            (train_size, 3 * num_verts))

        mean = numpy.mean(train_data, axis=0)
        std = numpy.std(train_data, axis=0)

        mean = numpy.mean(train_data)
        std = numpy.std(train_data)

        s_min = numpy.min(train_data)
        s_max = numpy.max(train_data)

        def normalize(data):
            return numpy.nan_to_num((data - mean) / std)
            # return numpy.nan_to_num((train_data - s_min) / (s_max - s_min))
        def denormalize(data):
            return data * std + mean
            # return data * (s_max - s_min) + s_min

        train_data = normalize(train_data)
        test_data = normalize(test_data)

        # print(train_data)
        # print(mean)
        # print(std)
        # exit()
        # this is the size of our encoded representations
        encoded_dim = 3

        # Single autoencoder
        # initializer = keras.initializers.RandomUniform(minval=0.0, maxval=0.01, seed=5)
        # bias_initializer = initializer
        activation = keras.layers.advanced_activations.LeakyReLU(
            alpha=0.3)  #'relu'

        input = Input(shape=(len(train_data[0]), ))
        output = input
        output = Dense(30, activation=activation)(input)
        output = Dense(512, activation=activation)(output)
        output = Dense(64, activation=activation)(output)
        output = Dense(encoded_dim, activation=activation,
                       name="encoded")(output)
        output = Dense(64, activation=activation)(output)
        output = Dense(512, activation=activation)(output)
        output = Dense(30, activation=activation)(output)
        output = Dense(len(train_data[0]), activation='linear')(
            output
        )  #'linear',)(output) # First test seems to indicate no change on output with linear

        autoencoder = Model(input, output)

        optimizer = keras.optimizers.Adam(lr=0.001,
                                          beta_1=0.9,
                                          beta_2=0.999,
                                          epsilon=1e-08,
                                          decay=0)
        autoencoder.compile(optimizer=optimizer, loss='mean_squared_error')

        model_start_time = time.time()
        autoencoder.fit(train_data,
                        train_data,
                        epochs=1000,
                        batch_size=num_samples,
                        shuffle=True,
                        validation_data=(test_data, test_data))

        # output_path = 'trained_models/' + datetime.datetime.now().strftime("%I %M%p %B %d %Y") + '.h5'
        # autoencoder.save(output_path)

        print("Total model time: ", time.time() - model_start_time)

        # Display

        decoded_samples = denormalize(autoencoder.predict(test_data))
        #decoded_samples = autoencoder.predict(test_data) * std + mean

        test_data_decoded = (numpy_base_verts + decoded_samples).reshape(
            test_size, num_verts, 3)
        test_data_decoded_eigen = [p2e(m) for m in test_data_decoded]
        ### End of Autoencoder

    # Error colours
    error = numpy.sum((test_data_decoded - numpy_verts_sample)**2, axis=2)
    colours = [igl.eigen.MatrixXd() for _ in range(num_samples)]
    for i in range(num_samples):
        igl.jet(p2e(error[i]), True, colours[i])

    # Set up
    viewer = igl.viewer.Viewer()

    viewer.data.set_mesh(initial_verts, initial_faces)

    def pre_draw(viewer):
        global current_frame, verts_sample, show_decoded

        if viewer.core.is_animating:
            print(current_frame)
            print(show_decoded)
            if show_decoded:
                viewer.data.set_vertices(
                    test_data_decoded_eigen[current_frame])
                viewer.data.set_colors(colours[current_frame])
            else:
                viewer.data.set_vertices(test_data_eigen[current_frame])

            viewer.data.compute_normals()
            current_frame = (current_frame + 1) % test_size

        return False

    viewer.callback_pre_draw = pre_draw
    viewer.callback_key_down = key_down
    viewer.core.is_animating = False
    # viewer.core.camera_zoom = 2.5
    viewer.core.animation_max_fps = 30.0

    viewer.launch()