def simulation1(para):
    X1, X2, X3 = np.identity(para['I1']), \
                 np.identity(para['I2']), \
                 np.identity(para['I3'])

    B1, B2, B3 = np.zeros((para['I1'], para['rank'])), \
                 np.zeros((para['I2'], para['rank'])), \
                 np.zeros((para['I3'], para['rank']))

    Gamma1, Gamma2, Gamma3 = np.random.randn(para['I1'], para['rank']), \
                             np.random.randn(para['I2'], para['rank']), \
                             np.random.randn(para['I3'], para['rank'])

    # 归一化后CP组合得到的Y与非归一化结果相同
    A, B, C = X1.dot(B1) + Gamma1, X2.dot(B2) + Gamma2, X3.dot(B3) + Gamma3
    lambda1, lambda2, lambda3 = np.linalg.norm(A, axis=0), np.linalg.norm(B, axis=0), np.linalg.norm(C, axis=0)
    weights = lambda1 * lambda2 * lambda3
    factors = [A / lambda1, B / lambda2, C / lambda3]

    # Step4调整符号
    weights, factors = tool.scaling_plus_minus(para, weights, factors)
    # Step5整矩阵列的顺序:计算降序排列后每列的位置
    weights, factors = tool.col_permutation(weights, factors)

    # Step5整矩阵列的顺序:计算降序排列后每列的位置

    ori_Y = tl.cp_to_tensor((weights, factors))

    sigma = tool.SNR_sigma(ori_Y, para['SNR'])  #
    error = np.random.normal(0, sigma, (para['I1'], para['I2'], para['I3']))  # 生成特定信噪比error

    Y = ori_Y + error
    print('Y均值为{}'.format(np.around(Y.mean(), decimals=2)))
    SNR1 = np.around(tl.norm(ori_Y) ** 2 / tl.norm(Y - ori_Y) ** 2, decimals=2)
    SNR2 = np.around(tl.norm(Y - ori_Y) ** 2 / tl.norm(ori_Y) ** 2, decimals=2)
    print('信号噪音比为:{}'.format(SNR1))  # 查看信噪比
    print('噪音信号比为:{}'.format(SNR2))  # 查看信噪比

    """
    算法实现
    """
    # 部分参数
    modes_list = [mode for mode in range(tl.ndim(Y))]
    corvariate = [X1, X2, X3]

    # 初始化
    # 首先计算投影矩阵
    project = [i.dot(np.linalg.inv(i.T.dot(i))).dot(i.T) for i in corvariate]

    # -------option:需要做GX估计的时候再加入这个步骤---------
    # 根据投影矩阵和调整后的因子矩阵来得到符合可识别性条件的G(X)
    GX = [project[i].dot(factors[i]) for i in range(len(factors))]
    # ------------------option:end---------------------

    # 然后将Y张量投影
    project_Y = tl.tenalg.multi_mode_dot(Y, project)
    # Step1&2:计算G1G2G3,不需要归一化
    weights, Gs = parafac(project_Y, rank=para['rank'], normalize_factors=False, n_iter_max=1000)
    # Step3借助G1G2G3计算ABC
    hat_factor = []
    for mode in modes_list:
        tildeY = tl.tenalg.multi_mode_dot(Y, project, skip=mode)
        tempGs = Gs.copy()
        tempGs[mode] = np.eye(para['rank'])
        Q = tl.unfold(tl.cp_to_tensor((None, tempGs)), mode=mode)  # 推导中的Q矩阵
        factor = tl.unfold(tildeY, mode=mode).dot(Q.T).dot(np.linalg.inv(Q.dot(Q.T)))  # 推导中因子矩阵的计算方式
        hat_factor.append(factor)  # [A,B,C]的估计值
    # Step4-normalize
    hat_weight = np.ones(para['rank'])
    for mode in modes_list:
        tempweight = np.linalg.norm(hat_factor[mode], axis=0)
        hat_weight = hat_weight * tempweight
        hat_factor[mode] = hat_factor[mode] / tempweight
    # Step4'-调整正负
    hat_weight, hat_factor = tool.scaling_plus_minus(para, hat_weight, hat_factor)
    # Step5-调整列顺序
    hat_weight, hat_factor = tool.col_permutation(hat_weight, hat_factor)

    # -----重构精度start---------
    # 利用新方法做出的估计
    hat_Y = tl.cp_to_tensor((hat_weight, hat_factor))
    new_RE = tl.norm(hat_Y - ori_Y) / tl.norm(ori_Y)

    # CP分解结果
    cp_weights, cp_factors = parafac(Y, rank=para['rank'], normalize_factors=True, n_iter_max=1000)

    cp_weights, cp_factors = tool.scaling_plus_minus(para, cp_weights, cp_factors)
    cp_weights, cp_factors = tool.col_permutation(cp_weights, cp_factors)
    newY = tl.cp_to_tensor((cp_weights, cp_factors))
    RE = tl.norm(newY - ori_Y) / tl.norm(ori_Y)

    # 保留4位小数
    new_RE = np.around(new_RE, decimals=5)
    RE = np.around(RE, decimals=5)
    print('RE of projected CP = {}, RE of original CP = {},'.format(new_RE, RE))
    # -----重构精度end---------

    # -----因子矩阵精度start---------
    # 利用新方法做出的估计
    pro_fac_RE = [np.around(tl.norm(factors[i] - hat_factor[i]) / tl.norm(factors[i]), decimals=5) for i in
                  range(len(factors))]
    ori_fac_RE = [np.around(tl.norm(factors[i] - cp_factors[i]) / tl.norm(factors[i]), decimals=5) for i in
                  range(len(factors))]
    print('factor_RE of projected CP = {}'.format(pro_fac_RE))
    print('factor_RE of original CP = {}'.format(ori_fac_RE))
    # -----因子矩阵精度end---------

    return (new_RE, RE, pro_fac_RE, ori_fac_RE)
    tempGs[mode] = np.eye(para['rank'])
    Q = tl.unfold(tl.cp_to_tensor((None, tempGs)), mode=mode)  # 推导中的Q矩阵
    factor = tl.unfold(tildeY, mode=mode).dot(Q.T).dot(
        np.linalg.inv(Q.dot(Q.T) + 1e-10))  # 推导中因子矩阵的计算方式
    hat_factor.append(factor)  # [A,B,C]的估计值

# Step4-normalize
hat_weight = np.ones(para['rank'])
for mode in modes_list:
    tempweight = np.linalg.norm(hat_factor[mode], axis=0)
    hat_weight = hat_weight * tempweight
    hat_factor[mode] = hat_factor[mode] / tempweight
# Step4'-调整正负
hat_weight, hat_factor = tool.scaling_plus_minus(para, hat_weight, hat_factor)
# Step5-调整列顺序
hat_weight, hat_factor = tool.col_permutation(hat_weight, hat_factor)

# -----重构精度start---------
"""
TODO:目前来看,投影方法遇到了一点bug,导致分解很差
问题所在:tensorly源码的锅,不能使用svd做初始化,且有一定概率在原始CP分解中遇到求逆出现奇异阵的情况,
解决方案:不建议修改源码,只需要做好自己算法的分析。采用random初始化

问题2:Error of reconstruction project_Y一直很低,Error of reconstruction Y有时会很高,
      所以在选定rank后,多跑几组,人为选取reconstruction Y结果好的呈现
"""
# 利用新方法做出的估计
hat_Y = tl.cp_to_tensor((hat_weight, hat_factor))
hat_Y_RE = tl.norm(hat_Y - Y) / tl.norm(Y)
print('Error of reconstruction Y is: {}'.format(hat_Y_RE))
# -----重构精度end---------
def simulation1(para):
    X1, X2, X3 = np.random.uniform(-1, 1, size=(para['I1'], para['q'])), \
                 np.random.uniform(-1, 1, size=(para['I2'], para['q'])), \
                 np.random.uniform(-1, 1, size=(para['I3'], para['q']))

    B1, B2, B3 = np.random.randn(para['q'], para['rank']), \
                 np.random.randn(para['q'], para['rank']), \
                 np.random.randn(para['q'], para['rank'])
    # """
    # 下面这个方法噪声比例会更高
    # """
    # B1, B2, B3 = np.random.uniform(-1, 1, size=(para['q'], para['rank'])), \
    #              np.random.uniform(-1, 1, size=(para['q'], para['rank'])), \
    #              np.random.uniform(-1, 1, size=(para['q'], para['rank']))

    # 归一化后CP组合得到的Y与非归一化结果相同
    A, B, C = X1.dot(B1), X2.dot(B2), X3.dot(B3)
    lambda1, lambda2, lambda3 = np.linalg.norm(A, axis=0), np.linalg.norm(
        B, axis=0), np.linalg.norm(C, axis=0)
    weights = lambda1 * lambda2 * lambda3
    factors = [A / lambda1, B / lambda2, C / lambda3]

    # Step4调整符号
    weights, factors = tool.scaling_plus_minus(para, weights, factors)
    # Step5整矩阵列的顺序:计算降序排列后每列的位置
    weights, factors = tool.col_permutation(weights, factors)

    # Step5整矩阵列的顺序:计算降序排列后每列的位置

    ori_Y = tl.cp_to_tensor((weights, factors))
    # ----------噪声项选用T3 还是5*T3----------
    #error = np.random.standard_t(3, size=(para['I1'], para['I2'], para['I3']))
    error = np.random.standard_t(3,
                                 size=(para['I1'], para['I2'], para['I3'])) * 5

    Y = ori_Y + error
    print('Y均值为{}'.format(np.around(Y.mean(), decimals=2)))
    SNR1 = np.around(tl.norm(ori_Y)**2 / tl.norm(Y - ori_Y)**2, decimals=2)
    SNR2 = np.around(tl.norm(Y - ori_Y)**2 / tl.norm(ori_Y)**2, decimals=2)
    print('信号噪音比为:{}'.format(SNR1))  # 查看信噪比
    print('噪音信号比为:{}'.format(SNR2))  # 查看信噪比
    """
    算法实现
    """
    # 部分参数
    modes_list = [mode for mode in range(tl.ndim(Y))]
    corvariate = [X1, X2, X3]

    # 初始化
    # 首先计算投影矩阵
    project = [i.dot(np.linalg.inv(i.T.dot(i))).dot(i.T) for i in corvariate]

    # -------option:需要做GX估计的时候再加入这个步骤---------
    # 根据投影矩阵和调整后的因子矩阵来得到符合可识别性条件的G(X)
    GX = [project[i].dot(factors[i]) for i in range(len(factors))]
    # ------------------option:end---------------------

    # 然后将Y张量投影
    project_Y = tl.tenalg.multi_mode_dot(Y, project)
    # Step1&2:计算G1G2G3,不需要归一化
    weights, Gs = parafac(project_Y,
                          rank=para['rank'],
                          normalize_factors=False,
                          n_iter_max=1000)
    # Step3借助G1G2G3计算ABC
    hat_factor = []
    for mode in modes_list:
        tildeY = tl.tenalg.multi_mode_dot(Y, project, skip=mode)
        tempGs = Gs.copy()
        tempGs[mode] = np.eye(para['rank'])
        Q = tl.unfold(tl.cp_to_tensor((None, tempGs)), mode=mode)  # 推导中的Q矩阵
        factor = tl.unfold(tildeY, mode=mode).dot(Q.T).dot(
            np.linalg.inv(Q.dot(Q.T)))  # 推导中因子矩阵的计算方式
        hat_factor.append(factor)  # [A,B,C]的估计值
    # Step4-normalize
    hat_weight = np.ones(para['rank'])
    for mode in modes_list:
        tempweight = np.linalg.norm(hat_factor[mode], axis=0)
        hat_weight = hat_weight * tempweight
        hat_factor[mode] = hat_factor[mode] / tempweight
    # Step4'-调整正负
    hat_weight, hat_factor = tool.scaling_plus_minus(para, hat_weight,
                                                     hat_factor)
    # Step5-调整列顺序
    hat_weight, hat_factor = tool.col_permutation(hat_weight, hat_factor)

    # -----重构精度start---------
    # 利用新方法做出的估计
    hat_Y = tl.cp_to_tensor((hat_weight, hat_factor))
    new_RE = tl.norm(hat_Y - ori_Y) / tl.norm(ori_Y)

    # CP分解结果
    cp_weights, cp_factors = parafac(Y,
                                     rank=para['rank'],
                                     normalize_factors=True,
                                     n_iter_max=1000)

    cp_weights, cp_factors = tool.scaling_plus_minus(para, cp_weights,
                                                     cp_factors)
    cp_weights, cp_factors = tool.col_permutation(cp_weights, cp_factors)
    newY = tl.cp_to_tensor((cp_weights, cp_factors))
    RE = tl.norm(newY - ori_Y) / tl.norm(ori_Y)

    # 保留4位小数
    new_RE = np.around(new_RE, decimals=5)
    RE = np.around(RE, decimals=5)
    print('RE of projected CP = {}, RE of original CP = {},'.format(
        new_RE, RE))
    # -----重构精度end---------

    # -----因子矩阵精度start---------
    # 利用新方法做出的估计
    pro_fac_RE = [
        np.around(tl.norm(factors[i] - hat_factor[i]) / tl.norm(factors[i]),
                  decimals=5) for i in range(len(factors))
    ]
    ori_fac_RE = [
        np.around(tl.norm(factors[i] - cp_factors[i]) / tl.norm(factors[i]),
                  decimals=5) for i in range(len(factors))
    ]
    print('factor_RE of projected CP = {}'.format(pro_fac_RE))
    print('factor_RE of original CP = {}'.format(ori_fac_RE))
    # -----因子矩阵精度end---------

    # -----G(X)精度start---------
    """
    TODO:在做这个估计精度时,单独去写一个实验,因为本实验未加入Gamma,参考下如何加入Gamma
          这个实验好处在于不用去和CP比较,CP没有GX
          初期生成数据的时候,就用投影矩阵把最初的GX计算出来
          写在了simulation_v3中

    hatGX = [project[i].dot(cp_factors[i]) for i in range(len(factors))]
    GX_RE = [tl.norm(hatGX[i]-GX[i])/tl.norm(GX[i]) for i in range(len(factors))]
    print('GX_RE of projected CP = {}'.format(GX_RE))
    """
    # -----G(X)精度end---------
    return (new_RE, RE, pro_fac_RE, ori_fac_RE)
def simulation1(para):
    X1, X2, X3 = np.random.uniform(-1, 1, size=(para['I1'], para['q'])), \
                 np.random.uniform(-1, 1, size=(para['I2'], para['q'])), \
                 np.random.uniform(-1, 1, size=(para['I3'], para['q']))

    B1, B2, B3 = np.random.randn(para['q'], para['rank']), \
                 np.random.randn(para['q'], para['rank']), \
                 np.random.randn(para['q'], para['rank'])
    Gamma1, Gamma2, Gamma3 = np.random.randn(para['I1'], para['rank']), \
                             np.random.randn(para['I2'], para['rank']), \
                             np.random.randn(para['I3'], para['rank'])
    corvariate = [X1, X2, X3]
    project = [i.dot(np.linalg.inv(i.T.dot(i))).dot(i.T)
               for i in corvariate]  # 需要对随机数Gamma做投影,所以得先计算投影矩阵

    #------通过投影,消除Gamma中与X无关的项------
    Gamma1 = Gamma1 - project[0].dot(Gamma1)
    Gamma2 = Gamma2 - project[0].dot(Gamma2)
    Gamma3 = Gamma3 - project[0].dot(Gamma3)

    # 归一化后CP组合得到的Y与非归一化结果相同
    A, B, C = X1.dot(B1) + Gamma1, X2.dot(B2) + Gamma2, X3.dot(B3) + Gamma3

    lambda1, lambda2, lambda3 = np.linalg.norm(A, axis=0), np.linalg.norm(
        B, axis=0), np.linalg.norm(C, axis=0)

    weights = lambda1 * lambda2 * lambda3
    factors = [A / lambda1, B / lambda2, C / lambda3]

    # Step4调整符号
    weights, factors = tool.scaling_plus_minus(para, weights, factors)
    # Step5整矩阵列的顺序:计算降序排列后每列的位置
    weights, factors = tool.col_permutation(weights, factors)
    # -------option:需要做GX估计的时候再加入这个步骤---------
    # 根据投影矩阵和调整后的因子矩阵来得到符合可识别性条件的G(X)
    GX = [project[i].dot(factors[i]) for i in range(len(factors))]
    # ------------------option:end---------------------
    # Step5整矩阵列的顺序:计算降序排列后每列的位置

    ori_Y = tl.cp_to_tensor((weights, factors))

    sigma = tool.SNR_sigma(ori_Y, para['SNR'])  #
    error = np.random.normal(
        0, sigma, (para['I1'], para['I2'], para['I3']))  # 生成特定信噪比error

    Y = ori_Y + error
    print('Y均值为{}'.format(np.around(Y.mean(), decimals=2)))
    SNR1 = np.around(tl.norm(ori_Y)**2 / tl.norm(Y - ori_Y)**2, decimals=2)
    SNR2 = np.around(tl.norm(Y - ori_Y)**2 / tl.norm(ori_Y)**2, decimals=2)
    print('信号噪音比为:{}'.format(SNR1))  # 查看信噪比
    print('噪音信号比为:{}'.format(SNR2))  # 查看信噪比
    """
    算法实现
    """
    # 部分参数
    modes_list = [mode for mode in range(tl.ndim(Y))]

    # 初始化
    # 首先计算投影矩阵
    project = [i.dot(np.linalg.inv(i.T.dot(i))).dot(i.T) for i in corvariate]

    # 然后将Y张量投影
    project_Y = tl.tenalg.multi_mode_dot(Y, project)
    # Step1&2:计算G1G2G3,不需要归一化
    weights, Gs = parafac(project_Y,
                          rank=para['rank'],
                          normalize_factors=False,
                          n_iter_max=1000)
    # Step3借助G1G2G3计算ABC
    hat_factor = []
    for mode in modes_list:
        tildeY = tl.tenalg.multi_mode_dot(Y, project, skip=mode)
        tempGs = Gs.copy()
        tempGs[mode] = np.eye(para['rank'])
        Q = tl.unfold(tl.cp_to_tensor((None, tempGs)), mode=mode)  # 推导中的Q矩阵
        factor = tl.unfold(tildeY, mode=mode).dot(Q.T).dot(
            np.linalg.inv(Q.dot(Q.T)))  # 推导中因子矩阵的计算方式
        hat_factor.append(factor)  # [A,B,C]的估计值
    # Step4-normalize
    hat_weight = np.ones(para['rank'])
    for mode in modes_list:
        tempweight = np.linalg.norm(hat_factor[mode], axis=0)
        hat_weight = hat_weight * tempweight
        hat_factor[mode] = hat_factor[mode] / tempweight
    # Step4'-调整正负
    hat_weight, hat_factor = tool.scaling_plus_minus(para, hat_weight,
                                                     hat_factor)
    # Step5-调整列顺序
    hat_weight, hat_factor = tool.col_permutation(hat_weight, hat_factor)
    hat_GX = [project[i].dot(hat_factor[i]) for i in range(len(factors))]

    # -------G(X)精度start---------
    GX_RE = [
        np.around(tl.norm(hat_GX[i] - GX[i]) / tl.norm(GX[i]), decimals=5)
        for i in range(len(factors))
    ]
    print('RE of GX = {}'.format(GX_RE))
    # -----G(X)精度end---------

    return (GX_RE)