Esempio n. 1
0
    def get_graph_laplacian(self, dsk):
        """
        Get the Laplacian linear decomposition af formula 12 L_op(dsk)


        k:= num_of_blendshapes
        n:= num_of_features

        :param dsk: (k, n)
        :return: L (k, n)
        """
        # compute coefficients
        ckl = compute_corr_coef(dsk, dsk)

        # compute laplacians
        L = []
        for k in range(np.shape(dsk)[0]):
            sum_coeff = 0
            sum_norm = 0
            for l in range(np.shape(dsk)[0]):
                if l != k:
                    sum_coeff += ckl[k, l] * (dsk[l] - dsk[k])
                    sum_norm += np.abs(ckl[k, l])
            L.append(sum_coeff / sum_norm)

        return np.array(L)
Esempio n. 2
0
    def get_graph_laplacian(self, dsk):
        """
        Get the Laplacian linear decomposition af formula 12 L_op(dsk) in the form L @ dsk

        L is computed by dividing the problem into a left (Sl -> a) and the right (Sk -> b) part such as L = a - b with:
            - a is the coefficient of the Ckl matrix divided by the norm over each row without the diagonal
            - b is a diagonal matrix in the form sum(dsk) / sum(|dsk|) over each row

        k:= num_of_blendshapes
        n:= num_of_features

        :param dsk: (k, n)
        :return: L (k, n)
        """
        # get ckl and remove diagonal
        ckl = compute_corr_coef(dsk, dsk)
        np.fill_diagonal(ckl, 0)  # for l!=k

        # compute decomposition of signed graph
        # compute sum(|ckl|)
        norm_ckl = np.sum(np.abs(ckl), axis=1)
        # compute sL coeff (left side)
        a = ckl / np.repeat(np.expand_dims(norm_ckl, axis=1), self.K, axis=1)
        # compute sk coeff (right side)
        sum_ckl = np.sum(ckl, axis=1)
        b = np.diag(sum_ckl / norm_ckl)
        # built L by subtracting b from a
        L = a - b

        return L
def compute_trust_values(dsk, do_plot=False):
    """
    Compute trust values following formula 6

    k:= number of blendshapes
    n:= num_features (num_markers*3)

    :param dsk: delta_sk vector (k, n)
    :param do_plot: decide if we want to plot the between-correlation matrix
    :return: trust values vector (k,)
    """

    if len(np.shape(dsk)) != 2:
        raise ValueError(
            "[COMPUTE TRUST VALUE] dsk dimensions not supported ({}) instead of 2"
            .format(len(np.shape(dsk))))

    # compute between-blendshape correlation
    ckl = compute_corr_coef(dsk, dsk)
    ckl = np.maximum(ckl, np.zeros(np.shape(ckl)))
    if do_plot:
        plot_similarities(ckl,
                          "Between blendshapes correlation",
                          vmin=0,
                          vmax=1)

    # compute lower triangle
    num_k = np.shape(ckl)[0]
    low_trig = np.zeros(num_k)
    for k in range(num_k):
        val = 0
        for l in range(k):
            val += ckl[k, l]
        low_trig[k] = val
    max_low_trig = np.max(low_trig)

    # compute trust values  (formula 6)
    tk = np.zeros(num_k)
    for k in range(len(tk)):
        tk[k] = 1 - low_trig[k] / max_low_trig

    return tk
Esempio n. 4
0
    # declare variables
    n_k = 4  # num_blendshapes
    n_m = 2  # num markers
    n_n = n_m * 3  # num_features (num_markers * 3)
    dsk = np.random.rand(n_k, n_n)
    dp = np.random.rand(n_k, n_n)
    print("shape dp", np.shape(dp))
    print(dp)
    print()

    # delcare ECEG
    e_CEG = ECEG(dsk)

    # compute Laplacian
    ckl = compute_corr_coef(dsk, dsk)
    L = []
    for k in range(np.shape(dsk)[0]):
        sum_coeff = 0
        sum_norm = 0
        for l in range(np.shape(dsk)[0]):
            if l != k:
                sum_coeff += ckl[k, l] * (dsk[l] - dsk[k])
                sum_norm += np.abs(ckl[k, l])
        L.append(sum_coeff / sum_norm)
    L = np.array(L)

    # compute eceg
    e_ceg = 0
    for k in range(n_k):
        ds = dsk[l] - dsk[k]
print("[data] num_blendshapes:", K)
print("[data] num_markers:", M)
print("[data] num_features (M*3):", M*n_dim)
print("[data] num_frames", F)
print()

# 1) Facial Motion Similarity
# reorder delta blendshapes
sorted_delta_sk, sorted_index = re_order_delta(delta_sk)
sorted_mesh_list = np.array(cleaned_mesh_list)[sorted_index]
print("[Pre-processing] shape sorted_delta_sk", np.shape(sorted_delta_sk))
print("[Pre-processing] len sorted_mesh_list", len(sorted_mesh_list))

if not load_pre_processed:
    # measure similarity between character blendshapes and actor's capture performance
    ckf = compute_corr_coef(np.reshape(delta_af, (np.shape(delta_af)[0], -1)),
                            np.reshape(sorted_delta_sk, (np.shape(sorted_delta_sk)[0], -1)))

    if do_plot:
        plot_similarities(ckf, "Fig. 7: Motion space similarity")

    # contrast enhancement
    tk = compute_trust_values(np.reshape(sorted_delta_sk, (np.shape(sorted_delta_sk)[0], -1)), do_plot=do_plot)
    tilda_ckf = compute_tilda_corr_coef(ckf, tk)
    print("[Pre-processing] shape ckf", np.shape(ckf))
    print("[Pre-processing] shape tk", np.shape(tk))
    print("[Pre-processing] shape tilda_ckf", np.shape(tilda_ckf))
    print()

    # 2) Key Expression Extraction
    key_expressions_idx = get_key_expressions(tilda_ckf, ksize=3, theta=2, do_plot=do_plot)
    F = len(key_expressions_idx)
Esempio n. 6
0
    np.random.seed(1)
    np.set_printoptions(precision=4, linewidth=200, suppress=True)

    # declare variables
    n_k = 4  # num_blendshapes
    n_m = 2  # num markers
    n_n = n_m * 3  # num_features (num_markers * 3)
    dsk = np.random.rand(n_k, n_n)
    dp = np.random.rand(n_k, n_n)
    print("shape dp", np.shape(dp))
    print(dp)

    # compute displacement and similarity
    dis_dsk = dp - np.reshape(dsk, (n_k, n_n))
    ckl = compute_corr_coef(dis_dsk, dis_dsk)

    # compute graph Laplacian
    e_ceg = 0
    for k in range(n_k):
        w = 0
        c_abs = 0
        for l in range(n_k):
            if l != k:
                ds = dis_dsk[l] - dis_dsk[k]
                w += ckl[k, l] * ds

                c_abs += np.abs(ckl[k, l])
        L = w / c_abs

        norm = np.linalg.norm(L) ** 2