def __init__(self, in_mesh, out_working_dir, num_bones):

        self.V, self.F_, self.N_ = self.read_mesh_file(in_mesh)

        self.B_ =num_bones
        self.W = np.zeros( (self.N_,self.B_) ) #weight

        self.iter_times = 100
        self.out_working_dir = out_working_dir
        self.K_ = 4

        self.continue_iterate = True
        self.pre_e_rms = -1

        solvers.options[ 'show_progress' ] = False
        solvers.options[ 'maxiters' ] = 200
        solvers.options[ 'feastol' ] = 1e-9

        #args used in update transformation
        self.theta = 3.0

        with h5py.File( in_mesh, 'r') as f:
            self.compute_distance = GeodesicDistanceComputation( f['verts'].value.astype(np.float)[0], f['tris'].value ) #used for computing geodesic distance

        #rotation matrix and translation matrix
        init_solver = SkinningInitialization(in_mesh, self.B_, 5)
        self.R, self.T = init_solver.compute()
        assert self.R.shape == (self.F_, self.B_, 3, 3)
        assert self.T.shape == (self.F_, self.B_, 3)
Exemple #2
0
def main(input_animation_file, output_sploc_file):
    with h5py.File(input_animation_file, 'r') as f:
        verts = f['verts'].value.astype(np.float)
        tris = f['tris'].value
    N, _ = verts.shape
    compute_distance = GeodesicDistanceComputation(verts, tris)

    with h5py.File(output_sploc_file, 'w') as f:
        f['Gnum'] = N
        for i in range(0, N):
            f['geodis%05d' % i] = compute_distance(i)
Exemple #3
0
    def __init__(self, in_mesh, bones_num, iter_times = 5 ):
        self.B_ = bones_num
        self.read_mesh_file(in_mesh)

        #each cluster is represented as a sequence of |t| bone transformations
        self.clust_rot = np.empty( (self.F_, self.B_, 3, 3) ) #Rotation Feature
        self.clust_transl = np.empty( (self.F_, self.B_, 3) ) #Translation Feature

        self.clust_patch = {}

        self.iter_times = iter_times

        with h5py.File( in_mesh, 'r') as f:
            self.compute_distance = GeodesicDistanceComputation( f['verts'].value.astype(np.float)[0], f['tris'].value ) #used for computing geodesic distance
        self.neighbour_num = 20
Exemple #4
0
def main(input_animation_file, output_sploc_file, output_animation_file):
    rest_shape = "first" # which frame to use as rest-shape ("first" or "average")
    K = 50 # number of components
    smooth_min_dist = 0.1 # minimum geodesic distance for support map, d_min_in paper
    smooth_max_dist = 0.7 # maximum geodesic distance for support map, d_max in paper
    num_iters_max = 10 # number of iterations to run
    sparsity_lambda = 2. # sparsity parameter, lambda in the paper

    rho = 10.0 # penalty parameter for ADMM
    num_admm_iterations = 10 # number of ADMM iterations

    # preprocessing: (external script)
        # rigidly align sequence
        # normalize into -0.5 ... 0.5 box

    with h5py.File(input_animation_file, 'r') as f:
        verts = f['verts'].value.astype(np.float)
        tris = f['tris'].value

    F, N, _ = verts.shape

    if rest_shape == "first":
        Xmean = verts[0]
    elif rest_shape == "average":
        Xmean = np.mean(verts, axis=0)

    # prepare geodesic distance computation on the restpose mesh
    compute_geodesic_distance = GeodesicDistanceComputation(Xmean, tris)

    # form animation matrix, subtract mean and normalize
    # notice that in contrast to the paper, X is an array of shape (F, N, 3) here
    X = verts - Xmean[np.newaxis]
    pre_scale_factor = 1 / np.std(X)
    X *= pre_scale_factor
    R = X.copy() # residual

    # find initial components explaining the residual
    C = []
    W = []
    for k in xrange(K):
        # find the vertex explaining the most variance across the residual animation
        magnitude = (R**2).sum(axis=2)
        idx = np.argmax(magnitude.sum(axis=0))
        # find linear component explaining the motion of this vertex
        U, s, Vt = svd(R[:,idx,:].reshape(R.shape[0], -1).T, full_matrices=False)
        wk = s[0] * Vt[0,:] # weights
        # invert weight according to their projection onto the constraint set 
        # this fixes problems with negative weights and non-negativity constraints
        wk_proj = project_weight(wk)
        wk_proj_negative = project_weight(-wk)
        wk = wk_proj \
                if norm(wk_proj) > norm(wk_proj_negative) \
                else wk_proj_negative
        s = 1 - compute_support_map(idx, compute_geodesic_distance, smooth_min_dist, smooth_max_dist)
        # solve for optimal component inside support map
        ck = (np.tensordot(wk, R, (0, 0)) * s[:,np.newaxis])\
                / np.inner(wk, wk)
        C.append(ck)
        W.append(wk)
        # update residual
        R -= np.outer(wk, ck).reshape(R.shape)
    C = np.array(C)
    W = np.array(W).T

    # prepare auxiluary variables
    Lambda = np.empty((K, N))
    U = np.zeros((K, N, 3))

    # main global optimization
    for it in xrange(num_iters_max):
        # update weights
        Rflat = R.reshape(F, N*3) # flattened residual
        for k in xrange(C.shape[0]): # for each component
            Ck = C[k].ravel()
            Ck_norm = np.inner(Ck, Ck)
            if Ck_norm <= 1.e-8:
                # the component seems to be zero everywhere, so set it's activation to 0 also
                W[:,k] = 0
                continue # prevent divide by zero
            # block coordinate descent update
            Rflat += np.outer(W[:,k], Ck)
            opt = np.dot(Rflat, Ck) / Ck_norm
            W[:,k] = project_weight(opt)
            Rflat -= np.outer(W[:,k], Ck)
        # update spatially varying regularization strength
        for k in xrange(K):
            ck = C[k]
            # find vertex with biggest displacement in component and compute support map around it
            idx = (ck**2).sum(axis=1).argmax()
            support_map = compute_support_map(idx, compute_geodesic_distance, 
                                              smooth_min_dist, smooth_max_dist)
            # update L1 regularization strength according to this support map
            Lambda[k] = sparsity_lambda * support_map
        # update components
        Z = C.copy() # dual variable
        # prefactor linear solve in ADMM
        G = np.dot(W.T, W)
        c = np.dot(W.T, X.reshape(X.shape[0], -1))
        solve_prefactored = cho_factor(G + rho * np.eye(G.shape[0]))
        # ADMM iterations
        for admm_it in xrange(num_admm_iterations):
            C = cho_solve(solve_prefactored, c + rho * (Z - U).reshape(c.shape)).reshape(C.shape)
            Z = prox_l1l2(Lambda, C + U, 1. / rho)
            U = U + C - Z
        # set updated components to dual Z, 
        # this was also suggested in [Boyd et al.] for optimization of sparsity-inducing norms
        C = Z
        # evaluate objective function
        R = X - np.tensordot(W, C, (1, 0)) # residual
        sparsity = np.sum(Lambda * np.sqrt((C**2).sum(axis=2)))
        e = (R**2).sum() + sparsity
        # TODO convergence check
        print "iteration %03d, E=%f" % (it, e)

    # undo scaling
    C /= pre_scale_factor

    # save components
    with h5py.File(output_sploc_file, 'w') as f:
        f['default'] = Xmean
        f['tris'] = tris
        for i, c in enumerate(C):
            f['comp%03d' % i] = c + Xmean

    # save encoded animation including the weights
    if output_animation_file:
        with h5py.File(output_animation_file, 'w') as f:
            f['verts'] = np.tensordot(W, C, (1, 0)) + Xmean[np.newaxis]
            f['tris'] = tris
            f['weights'] = W
Exemple #5
0
def main(input_skin_file, input_animation_file, output_sploc_file):

    K = 10 # number of components
    smooth_min_dist = 0.1 # minimum geodesic distance for support map, d_min_in paper
    smooth_max_dist = 0.7 # maximum geodesic distance for support map, d_max in paper
    num_iters_max = 10 # number of iterations to run
    sparsity_lambda = 2. # sparsity parameter, lambda in the paper

    rho = 10.0 # penalty parameter for ADMM
    num_admm_iterations = 10 # number of ADMM iterations

    with h5py.File(input_skin_file,'r') as f:
        skin_verts = f['verts'].value.astype(np.float)
        skin_tris = f['tris'].value


    with h5py.File(input_animation_file,'r') as f:
        verts = f['verts'].value.astype(np.float)
        tris = f['tris'].value

    F,N,_ = verts.shape

    #Preprocess
    skin_norms = np.array( [ cal_mesh_normals_api(skin_verts[f], skin_tris )  for f in xrange(F) ] )
    #skin_norms2 = np.array( [ cal_mesh_normals(skin_verts[f], skin_tris )  for f in xrange(F) ] )
    e = corresponding_edge( N, skin_tris )
    w2m_mat = np.array( [ [ w2m_transf_mat(skin_norms[f][i], skin_verts[f][ e[i] ] - skin_verts[f][i], skin_verts[f][i] ) for i in xrange(N) ] for f in xrange(F) ] )

    compute_geodesic_distance = GeodesicDistanceComputation(verts[0],tris)

    X = verts - skin_verts
    X_ext = np.append(X, np.ones([F,N,1],dtype=X.dtype ), axis=2)
    X_loc = np.empty( X_ext.shape ,dtype=X.dtype )
    for i in xrange(F):
        for j in xrange(N):
            X_loc[i,j,:] = w2m_mat[i,j,:,:].dot( X_ext[i,j,:] )
    X = np.delete(X_loc,3,2)
    pre_scale_factor = 1 / np.std(X)
    X *= pre_scale_factor
    R = X.copy() # residual

    # find initial components explaining the residual
    C = []
    W = []
    for k in xrange(K):
        # find the vertex explaining the most variance across the residual animation
        magnitude = (R**2).sum(axis=2)
        idx = np.argmax(magnitude.sum(axis=0))
        # find linear component explaining the motion of this vertex
        U, s, Vt = svd(R[:,idx,:].reshape(R.shape[0], -1).T, full_matrices=False)
        wk = s[0] * Vt[0,:] # weights
        # invert weight according to their projection onto the constraint set
        # this fixes problems with negative weights and non-negativity constraints
        wk_proj = project_weight(wk)
        wk_proj_negative = project_weight(-wk)
        wk = wk_proj \
                if norm(wk_proj) > norm(wk_proj_negative) \
                else wk_proj_negative
        s = 1 - compute_support_map(idx, compute_geodesic_distance, smooth_min_dist, smooth_max_dist)
        # solve for optimal component inside support map
        ck = (np.tensordot(wk, R, (0, 0)) * s[:,np.newaxis])\
                / np.inner(wk, wk)
        C.append(ck)
        W.append(wk)
        # update residual
        R -= np.outer(wk, ck).reshape(R.shape)
    C = np.array(C)
    W = np.array(W).T

    # prepare auxiluary variables
    Lambda = np.empty((K, N))
    U = np.zeros((K, N, 3))

    # main global optimization
    for it in xrange(num_iters_max):
        # update weights
        Rflat = R.reshape(F, N*3) # flattened residual
        for k in xrange(C.shape[0]): # for each component
            Ck = C[k].ravel()
            Ck_norm = np.inner(Ck, Ck)
            if Ck_norm <= 1.e-8:
                # the component seems to be zero everywhere, so set it's activation to 0 also
                W[:,k] = 0
                continue # prevent divide by zero
            # block coordinate descent update
            Rflat += np.outer(W[:,k], Ck)
            opt = np.dot(Rflat, Ck) / Ck_norm
            W[:,k] = project_weight(opt)
            Rflat -= np.outer(W[:,k], Ck)
        # update spatially varying regularization strength
        for k in xrange(K):
            ck = C[k]
            # find vertex with biggest displacement in component and compute support map around it
            idx = (ck**2).sum(axis=1).argmax()
            support_map = compute_support_map(idx, compute_geodesic_distance,
                                              smooth_min_dist, smooth_max_dist)
            # update L1 regularization strength according to this support map
            Lambda[k] = sparsity_lambda * support_map
        # update components
        Z = C.copy() # dual variable
        # prefactor linear solve in ADMM
        G = np.dot(W.T, W)
        c = np.dot(W.T, X.reshape(X.shape[0], -1))
        solve_prefactored = cho_factor(G + rho * np.eye(G.shape[0]))
        # ADMM iterations
        for admm_it in xrange(num_admm_iterations):
            C = cho_solve(solve_prefactored, c + rho * (Z - U).reshape(c.shape)).reshape(C.shape)
            Z = prox_l1l2(Lambda, C + U, 1. / rho)
            U = U + C - Z
        # set updated components to dual Z,
        # this was also suggested in [Boyd et al.] for optimization of sparsity-inducing norms
        C = Z
        # evaluate objective function
        R = X - np.tensordot(W, C, (1, 0)) # residual
        sparsity = np.sum(Lambda * np.sqrt((C**2).sum(axis=2)))
        e = (R**2).sum() + sparsity
        # TODO convergence check
        print "iteration %03d, E=%f" % (it, e)

    # undo scaling
    C /= pre_scale_factor
    for _, c in enumerate(C):
            c_ext = np.append( c,np.ones( [N,1], dtype = c.dtype ) ,axis=1 )
            c_world = np.empty( c_ext.shape, dtype = c.dtype )
            for i in xrange(N):
                m2w_mat_per_v = np.transpose(w2m_mat[0,i,::])
                c_world[i,:] = m2w_mat_per_v.dot( c_ext[i,:] )
            c = np.delete( c_world, 3, 1)
    Xmean = skin_verts[0]
    with h5py.File(output_sploc_file, 'w') as f:
        f['default'] = Xmean
        f['tris'] = tris
        for i, c in enumerate(C):
            f['comp%03d' % i] = c + Xmean
Exemple #6
0
def main(input_animation_file, output_sploc_file, output_animation_file, mid, anderson_m, outiter, out_res, input_rho):
    rest_shape = "first" # which frame to use as rest-shape ("first" or "average")
    K = 50 # number of components
    smooth_min_dist = 0.1 # minimum geodesic distance for support map, d_min_in paper
    smooth_max_dist = 0.7 # maximum geodesic distance for support map, d_max in paper
    num_iters_max = 1 # number of iterations to run
    sparsity_lambda = 2. # sparsity parameter, lambda in the paper
    input_mid = mid

    rho = input_rho # penalty parameter for ADMM
    num_admm_iterations = 3000 # number of ADMM iterations


    # preprocessing: (external script)
        # rigidly align sequence
        # normalize into -0.5 ... 0.5 box

    with h5py.File(input_animation_file, 'r') as f:
        verts = f['verts'].value.astype(np.float)
        tris = f['tris'].value

    F, N, _ = verts.shape

    if rest_shape == "first":
        Xmean = verts[0]
    elif rest_shape == "average":
        Xmean = np.mean(verts, axis=0)

    # prepare geodesic distance computation on the restpose mesh
    compute_geodesic_distance = GeodesicDistanceComputation(Xmean, tris)

    # form animation matrix, subtract mean and normalize
    # notice that in contrast to the paper, X is an array of shape (F, N, 3) here
    X = verts - Xmean[np.newaxis]
    pre_scale_factor = 1 / np.std(X)
    X *= pre_scale_factor
    R = X.copy() # residual

    # find initial components explaining the residual
    C = []
    W = []
    for k in range(K):
        # find the vertex explaining the most variance across the residual animation
        magnitude = (R**2).sum(axis=2)
        idx = np.argmax(magnitude.sum(axis=0))
        # find linear component explaining the motion of this vertex
        U, s, Vt = la.svd(R[:,idx,:].reshape(R.shape[0], -1).T, full_matrices=False)
        wk = s[0] * Vt[0,:] # weights
        # invert weight according to their projection onto the constraint set 
        # this fixes problems with negative weights and non-negativity constraints
        wk_proj = project_weight(wk)
        wk_proj_negative = project_weight(-wk)
        wk = wk_proj \
                if norm(wk_proj) > norm(wk_proj_negative) \
                else wk_proj_negative
        s = 1 - compute_support_map(idx, compute_geodesic_distance, smooth_min_dist, smooth_max_dist)
        # solve for optimal component inside support map
        ck = (np.tensordot(wk, R, (0, 0)) * s[:,np.newaxis])\
                / np.inner(wk, wk)
        C.append(ck)
        W.append(wk)
        # update residual
        R -= np.outer(wk, ck).reshape(R.shape)
    C = np.array(C)
    W = np.array(W).T

    # prepare auxiluary variables
    Lambda = np.empty((K, N))
    U = np.zeros((K, N, 3))

    total_begintime = time.time()
    # main global optimization
    for it in range(num_iters_max):
        if it == outiter:
            mid = input_mid
        else:
            mid = 0

        # update weights
        Rflat = R.reshape(F, N*3) # flattened residual
        for k in range(C.shape[0]): # for each component
            Ck = C[k].ravel()
            Ck_norm = np.inner(Ck, Ck)
            if Ck_norm <= 1.e-8:
                # the component seems to be zero everywhere, so set it's activation to 0 also
                W[:,k] = 0
                continue # prevent divide by zero
            # block coordinate descent update
            Rflat += np.outer(W[:, k], Ck)
            opt = np.dot(Rflat, Ck) / Ck_norm
            W[:,k] = project_weight(opt)
            Rflat -= np.outer(W[:,k], Ck)
        # update spatially varying regularization strength
        for k in range(K):
            ck = C[k]
            # find vertex with biggest displacement in component and compute support map around it
            idx = (ck**2).sum(axis=1).argmax()
            support_map = compute_support_map(idx, compute_geodesic_distance, 
                                              smooth_min_dist, smooth_max_dist)
            # update L1 regularization strength according to this support map
            Lambda[k] = sparsity_lambda * support_map
        # update components
        Z = C.copy() # dual variable
        # prefactor linear solve in ADMM
        G = np.dot(W.T, W)
        c = np.dot(W.T, X.reshape(X.shape[0], -1))
        solve_prefactored = cho_factor(G + rho * np.eye(G.shape[0]))

        times = []
        residuals = []
        energys = []
        # ADMM iterations
        size1 = Z.flatten().shape[0]
        print("Z size = %d"%(size1))
        if mid == 0:
            ignore_t = 0
            total_t = 0
            for admm_it in range(num_admm_iterations):
                begin_time = time.time()
                Z_prev = Z.copy()
                C = cho_solve(solve_prefactored, c + rho * (Z - U).reshape(c.shape)).reshape(C.shape)
                Z = prox_l1l2(Lambda, C + U, 1. / rho)
                U = U + C - Z                

                # ignore_st = time.time()
                # # f(x) + g(z)
                # R = X - np.tensordot(W, C, (1, 0))  # residual
                # sparsity = np.sum(Lambda * np.sqrt((C ** 2).sum(axis=2)))
                # e = (R ** 2).sum() + sparsity
                # ignore_et = time.time()
                # ignore_t = ignore_t + ignore_et - ignore_st

                # energys.append(e)
                run_time = time.time()
                total_t = total_t + run_time - begin_time
                times.append(total_t)
                # times.append(run_time - begin_time - ignore_t)
                res = la.norm(C - Z)**2 + la.norm(Z - Z_prev)**2
                residuals.append(np.sqrt(rho*res/size1))
                if res < 1e-10:
                    break

        # AA-ADMM iterations        
        if mid == 1:
            # acc parameters        
            Z_default = Z.copy()
            U_default = U.copy()
            acc = anderson.Anderson(np.concatenate((Z.flatten(), U.flatten()), axis=0), anderson_m)
            reset = True
            r_prev = 1e10
            admm_it = 0
            ignore_t = 0
            reset_count = 0
            total_t = 0
            AA_t = 0
            while admm_it < num_admm_iterations:
                begin_time = time.time()
                Z_prev = Z.copy()
                C = cho_solve(solve_prefactored, c + rho * (Z - U).reshape(c.shape)).reshape(C.shape)
                Z = prox_l1l2(Lambda, C + U, 1. / rho)
                U = U + C - Z
                res = la.norm(C - Z)**2 + la.norm(Z-Z_prev)**2
                # res = np.square(la.norm(C-Z)) + np.square(la.norm(Z-Z_prev))

                # ignore_st = time.time()
                # # f(x) + g(z)
                # R = X - np.tensordot(W, C, (1, 0))  # residual
                # sparsity = np.sum(Lambda * np.sqrt((C ** 2).sum(axis=2)))
                # e = (R ** 2).sum() + sparsity
                # ignore_et = time.time()
                # ignore_t = ignore_t + ignore_et - ignore_st
                if reset or res < r_prev:
                    Z_default = Z.copy()
                    U_default = U.copy()
                    r_prev = res
                    reset = False
                    acc_ZU = acc.compute(np.concatenate((Z.flatten(), U.flatten()), axis=0))
                    Z = acc_ZU[:size1].reshape(Z.shape)
                    U = acc_ZU[size1:].reshape(U.shape)

                    admm_it = admm_it + 1
                else:
                    Z = Z_default.copy()
                    U = U_default.copy()
                    reset = True
                    reset_count = reset_count + 1
                    acc.replace(np.concatenate((Z.flatten(), U.flatten()), axis=0))
                    continue

                # energys.append(e)
                run_time = time.time()
                total_t = total_t + run_time - begin_time
                times.append(total_t)
                # times.append(run_time - begin_time - ignore_t)
                residuals.append(np.sqrt(rho*r_prev/size1))
                if res < 1e-10:
                    break
            print('AA ADMM reset number:%d, total time: %.6f'%(reset_count, total_t))

        ## AA-DR primal
        if mid == 2:
            # acc parameters
            s = Z + U
            s_default = s.copy()
            acc = anderson.Anderson(s.flatten(), anderson_m)
            reset = True
            r_prev = 1e10
            admm_it = 0
            ignore_t = 0
            reset_count = 0
            total_t = 0
            while admm_it < num_admm_iterations:
                begin_time = time.time()
                Z = prox_l1l2(Lambda, s, 1. / rho)
                C = cho_solve(solve_prefactored, c + rho * (2*Z - s).reshape(c.shape)).reshape(C.shape)
                s = s + C - Z
                res = la.norm(C - Z)**2
                # res = np.square(la.norm(C - Z))

                # ignore_st = time.time()
                run_time = time.time()
                total_t = total_t + run_time - begin_time

                Z_p = prox_l1l2(Lambda, s, 1. / rho)
                r_com = la.norm(C - Z_p)**2 + la.norm(Z_p - Z)**2
                # r_com = np.square(la.norm(C - Z_p)) + np.square(la.norm(Z_p - Z))
                # f(x) + g(z)
                # R = X - np.tensordot(W, C, (1, 0))  # residual
                # sparsity = np.sum(Lambda * np.sqrt((C ** 2).sum(axis=2)))
                # e = (R ** 2).sum() + sparsity
                # ignore_et = time.time()
                # ignore_t = ignore_t + (ignore_et - ignore_st)
                begin_time = time.time()

                if reset or res < r_prev:
                    s_default = s.copy()
                    r_prev = res
                    reset = False
                    acc_s = acc.compute(s_default.flatten())
                    s = acc_s.reshape(s.shape)                   
                    admm_it = admm_it + 1
                else:
                    s = s_default.copy()
                    reset = True
                    reset_count = reset_count + 1
                    acc.replace(s_default.flatten())
                    continue

                # energys.append(e)
                run_time = time.time()
                total_t = total_t + run_time - begin_time
                times.append(total_t)
                # times.append(run_time - begin_time - ignore_t)
                residuals.append(np.sqrt(rho*r_com/size1))
                if r_com < 1e-10:
                    break
            print('DR reset number:%d, total time: %.6f'%(reset_count, total_t))

        ## AA-DR DR envelope
        # if mid == 3:
        #     # acc parameters
        #     s = Z - U
        #     s_default = s.copy()
        #     acc = anderson.Anderson(s.flatten(), anderson_m)
        #     reset = True
        #     dre_prev = 1e10
        #     admm_it = 0
        #     ignore_t = 0
        #     reset_count = 0
        #     begin_time = time.time()
        #     while admm_it < num_admm_iterations:
        #         Z = prox_l1l2(Lambda, s, 1. / rho)
        #         C = cho_solve(solve_prefactored, c + rho * (2*Z - s).reshape(c.shape)).reshape(C.shape)
        #         s = s + C - Z
        #         res = np.square(la.norm(C - Z))

        #         ignore_st = time.time()
        #         Z_p = prox_l1l2(Lambda, s, 1. / rho)
        #         r_com = res + np.square(la.norm(Z_p - Z))
        #         ignore_et = time.time()
        #         ignore_t = ignore_t + (ignore_et - ignore_st)

        #         # f(x) + g(z)
        #         R = X - np.tensordot(W, C, (1, 0))  # residual
        #         sparsity = np.sum(Lambda * np.sqrt((C ** 2).sum(axis=2)))
        #         e = (R ** 2).sum() + sparsity

        #         #DRE
        #         dre = e + rho * np.sum(np.multiply(s-Z, C-Z)) + res * rho / 2

        #         if reset or dre < dre_prev:
        #             s_default = s.copy()
        #             dre_prev = dre
        #             reset = False
        #             acc_s = acc.compute(s_default.flatten())
        #             s = acc_s.reshape(s.shape)
        #             admm_it = admm_it + 1
        #         else:
        #             s = s_default.copy()
        #             reset = True
        #             reset_count = reset_count + 1
        #             acc.replace(s_default.flatten())
        #             continue

        #         energys.append(e)
        #         run_time = time.time()
        #         times.append(run_time - begin_time - ignore_t)
        #         residuals.append(np.sqrt(rho*r_com/size1))
        #         if r_com < 1e-10:
        #             break
        #     print('DR Envolop reset number:%d'%(reset_count))

        if it == outiter:
            fname = out_res + '.txt'
            print_res(fname, times, energys, residuals)

        # set updated components to dual Z, 
        # this was also suggested in [Boyd et al.] for optimization of sparsity-inducing norms
        C = Z
        # evaluate objective function
        R = X - np.tensordot(W, C, (1, 0)) # residual
        sparsity = np.sum(Lambda * np.sqrt((C**2).sum(axis=2)))
        e = (R**2).sum() + sparsity


        # TODO convergence check
        print("iteration %03d, E=%f" % (it, e))

        if it == outiter:
            break

    # undo scaling
    C /= pre_scale_factor
    total_endtime = time.time()
    run_total_time = total_endtime - total_begintime

    # # save components
    # with h5py.File(output_sploc_file, 'w') as f:
    #     f['default'] = Xmean
    #     f['tris'] = tris
    #     for i, c in enumerate(C):
    #         f['comp%03d' % i] = c + Xmean
    #
    # # save encoded animation including the weights
    # if output_animation_file:
    #     with h5py.File(output_animation_file, 'w') as f:
    #         f['verts'] = np.tensordot(W, C, (1, 0)) + Xmean[np.newaxis]
    #         f['tris'] = tris
    #         f['weights'] = W

    return run_total_time, e