Esempio n. 1
0
def tensor_determinant(g):
    """

    :param g: tensor dim = 3*3*xg*yg*z
    :return: determinant of the tensor dim = xg*yg*zg
    """
    import numpy as np

    import clinica.pipelines.machine_learning_spatial_svm.spatial_svm_utils as utils

    g = np.array(g)
    d = 0
    s = g.shape
    # recursive function we have a matrix of 3*3 and then we divide it in different blocks of 2*2 and we calculate the determinant
    # of them making a um of the different determinants. The resulting determinant it's a sum of the different blocks.
    if s[0] == 3:
        # if the tensor is 3*3

        for i in range(s[0]):

            if np.mod(i, 2) == 0:
                epsilon = 1
            else:
                epsilon = -1

            if i == 0:
                g1 = [[g[1][1], g[1][2]], [g[2][1], g[2][2]]]
            elif i == 1:
                g1 = [[g[0][1], g[0][2]], [g[2][1], g[2][2]]]
            else:
                g1 = [[g[0][1], g[0][2]], [g[1][1], g[1][2]]]
            # it's a recursive function
            prod = epsilon * g[i][0] * utils.tensor_determinant(g1)
            d = d + prod

    elif s[0] == 2:
        # if the tensor is 2*2
        for i in range(s[0]):
            if np.mod(i, 2) == 0:
                epsilon = 1
            else:
                epsilon = -1
            if i == 0:
                g1 = [g[1][1]]
            elif i == 1:
                g1 = [g[0][1]]
            prod = epsilon * g[i][0] * utils.tensor_determinant(g1)
            d = d + prod

    elif s[0] == 1:
        # if the tensor is 1 matrix
        d = [g[0]]
    return d
Esempio n. 2
0
def tensor_commatrix(g):
    """

    :param g: tensor
    :return: commatrix of the tensor
    """
    import numpy as np

    import clinica.pipelines.machine_learning_spatial_svm.spatial_svm_utils as utils

    g = np.array(g)
    g_com = []

    for i in range(g.shape[0]):
        for j in range(g.shape[0]):
            if np.mod(i + j, 2) == 0:
                epsilon = 1
            else:
                epsilon = -1
            if i == 0:
                if j == 0:
                    n = [[g[1][1], g[1][2]], [g[2][1], g[2][2]]]
                    a0 = epsilon * utils.tensor_determinant(n)
                elif j == 1:
                    n = [[g[1][0], g[1][2]], [g[2][0], g[2][2]]]
                    a1 = epsilon * utils.tensor_determinant(n)
                else:
                    n = [[g[1][0], g[1][1]], [g[2][0], g[2][1]]]
                    a2 = epsilon * utils.tensor_determinant(n)
            elif i == 1:
                if j == 0:
                    n = [[g[0][1], g[0][2]], [g[2][1], g[2][2]]]
                    b0 = epsilon * utils.tensor_determinant(n)
                elif j == 1:
                    n = [[g[0][0], g[0][2]], [g[2][0], g[2][2]]]
                    b1 = epsilon * utils.tensor_determinant(n)
                else:
                    n = [[g[0][0], g[0][1]], [g[2][0], g[2][1]]]
                    b2 = epsilon * utils.tensor_determinant(n)
            else:
                if j == 0:
                    n = [[g[0][1], g[0][2]], [g[1][1], g[1][2]]]
                    c0 = epsilon * utils.tensor_determinant(n)
                elif j == 1:
                    n = [[g[0][0], g[0][2]], [g[1][0], g[1][2]]]
                    c1 = epsilon * utils.tensor_determinant(n)
                else:
                    n = [[g[0][0], g[0][1]], [g[1][0], g[1][1]]]
                    c2 = epsilon * utils.tensor_determinant(n)
    g_com = [[a0, a1, a2], [b0, b1, b2], [c0, c1, c2]]
    g_com = np.array(g_com)
    if len(g_com.shape) == 6:
        g_com = g_com[:, :, 0, :, :, :]

    return g_com
Esempio n. 3
0
def heat_finite_elt_2D_tensor2(x0, t_final, t_step, h, g):
    """

    :param x0: vector x (at t = 0)
    :param t_final: time
    :param t_step: time step (must satisfy the CFL max(lambda) < 2)
    :param h:
    :param g: metric tensor
    :return: vector x (at t = t_final)

    """
    import clinica.pipelines.machine_learning_spatial_svm.spatial_svm_utils as utils
    import numpy as np

    # parameters
    nb_step = np.ceil(t_final / t_step)  # number of time step
    t_step = t_final / nb_step

    # tensors
    detg = utils.tensor_determinant(g)
    detg = np.sqrt(detg)
    ginv = utils.tensor_inverse(g)
    ginv = utils.tensor_scalar_product(detg, ginv)
    detg2 = detg[1:-1, 1:-1]

    # LOOP
    x = x0
    for i in range(nb_step):
        m = t_step / h / h
        x = np.sum(x, -(
            utils.tensor_scalar_product(m,
                                        np.divide(utils.tensor_scalar_product(h, utils.operateur(x, ginv, detg)), detg2, dtype=object))))

    return x
Esempio n. 4
0
def largest_eigenvalue_heat_3D_tensor2(g, h, epsilon):
    """

    :param g: metric tensor
    :param h: space step
    :param epsilon: stop criterion
    :return: lamba = the largest eigenvalues

    """
    import clinica.pipelines.machine_learning_spatial_svm.spatial_svm_utils as utils

    import numpy as np
    import cmath

    # parameters
    if epsilon is None:
        epsilon = 1e-6
    erreur = 1 + epsilon

    # tensors

    detg = utils.tensor_determinant(g)
    detg = np.array(detg, dtype=np.complex128)  # complex tensor
    detg = np.sqrt(detg)
    detg = detg[0]
    ginv = utils.tensor_inverse(g)

    if len(ginv.shape) == 6:
        ginv = ginv[:, :, 0, :, :, :]

    ginv = utils.tensor_scalar_product(detg, ginv)
    detg2 = detg[1:-1, 1:-1, 1:-1]  # 141*121*141
    detg2[detg2 != detg2] = 0
    detg[detg != detg] = 0
    ginv[ginv != ginv] = 0

    # initialisation

    s = [g[0][0].shape[0] - 2, g[0][0].shape[1] - 2, g[0][0].shape[2] - 2]
    b1 = np.ones([s[0], s[1], s[2]])

    b1 = np.divide(b1, np.array(cmath.sqrt(np.dot(b1.flatten('F').transpose(), b1.flatten('F'))), dtype=np.complex128))

    print("Computation of the largest eigenvalue ...")
    while erreur > epsilon:
        b0 = b1
        b2 = np.array(np.divide(np.array(utils.operateur(b1, ginv, detg)) * h, detg2) / h / h / h, dtype=np.complex128)
        b1 = np.divide(b2, np.array(cmath.sqrt(np.dot(b2.flatten('F').transpose(), b2.flatten('F')))),
                       dtype=np.complex128)

        erreur = np.linalg.norm(b1.flatten('F') - b0.flatten('F'))

    print("done")

    lam = (cmath.sqrt(np.dot(b2.flatten('F').transpose(), b2.flatten('F'))))

    return lam
Esempio n. 5
0
def heat_finite_elt_3D_tensor2(x0, t_final, t_step, h, g):
    """

    :param x0: vector x (at t = 0)
    :param t_final: time
    :param t_step: time step (must satisfy the CFL max(lambda) < 2)
    :param h:
    :param g: metric tensor
    :return: vector x (at t = t_final)

    """
    import numpy as np

    import clinica.pipelines.machine_learning_spatial_svm.spatial_svm_utils as utils

    if len(x0.shape) == 4:
        x0 = x0[0, :, :, :]

    # parameters
    nb_step = np.ceil(t_final / t_step)  # number of time step
    nb_step = nb_step.astype(int)
    t_step = t_final / nb_step

    # tensors
    detg = utils.tensor_determinant(g)
    detg = np.sqrt(detg)
    ginv = utils.tensor_inverse(g)
    ginv = utils.tensor_scalar_product(detg, ginv)
    detg2 = detg[:, 1:-1, 1:-1, 1:-1]
    if len(ginv.shape) == 6:
        ginv = ginv[:, :, 0, :, :, :]
    if len(detg.shape) == 4:
        detg = detg[0, :, :, :]
    ginv = np.array(ginv.real, dtype="float64")
    detg = np.array(detg.real, dtype="float64")
    detg2 = np.array(detg2.real, dtype="float64")

    # LOOP
    x = x0
    for i in range(nb_step):
        x = np.array(
            x
            - t_step
            * (np.divide(np.array(utils.operateur(x, ginv, detg)) * h, detg2))
            / h
            / h
            / h
        )

    return x
Esempio n. 6
0
def tensor_inverse(g):
    """

    :param g: tensor
    :return: inverse of the tensor
    """
    import clinica.pipelines.machine_learning_spatial_svm.spatial_svm_utils as utils

    h = utils.tensor_transpose(utils.tensor_commatrix(g))
    detg = utils.tensor_determinant(g)

    h = h * (1 / (detg))
    mask = (h != h)
    h[mask] = 0
    return h
Esempio n. 7
0
def tensor_eigenvalues(g):
    """

    :param g: tensor
    :return: eigenvalues of the tensor

    """
    import numpy as np

    import clinica.pipelines.machine_learning_spatial_svm.spatial_svm_utils as utils

    g = np.array(g)

    if g.shape[0] < 4:
        # condition if we have a tensor

        C1 = np.ones(len(np.ravel(g[0][0])))
        buff = -utils.tensor_trace(g)
        C2 = buff.flatten("F")
        buff = utils.tensor_trace(utils.tensor_product(g, g))
        buff = -0.5 * (buff.flatten("F") - np.multiply(C2, C2))
        C3 = buff.flatten("F")
        buff = -utils.tensor_determinant(g)
        C4 = buff.flatten("F")

        C = np.array([C1, C2, C3, C4])
        rts = utils.roots_poly(C)

    else:
        print("Degree too big : not still implemented")

    rts2 = rts.real.copy()
    rts2.sort()

    lamb = np.zeros(
        shape=(g.shape[0], g.shape[2], g.shape[3], g.shape[4]), dtype="complex128"
    )

    for i in range(g.shape[0]):
        lamb[i, :, :, :] = rts2[:, i].reshape(
            g.shape[2], g.shape[3], g.shape[4], order="F"
        )

    # lamb[0] is the smallest eigenvalues, lamb[2] is the biggest
    return lamb