示例#1
0
def clusterCore(channelData1, covMatrixList1, channelData2, centroids,
                centroidUList, type):
    newChannelData1 = []
    newChannelData2 = []
    newDimension = np.shape(centroidUList[0])[1]
    p = np.shape(channelData1)[0]

    if type == "C":
        # 计算信道相关系数矩阵并输出,然后放到一个矩阵中
        allCovMatrix1 = tools.matrixListToMatrix(covMatrixList1)

        # 确定每个数据分别属于哪个簇
        clusterAssment = kmeans.getClusterAssment(allCovMatrix1, centroids)

        # 变换域
        for i in range(p):
            newChannelData1.append(
                np.dot(channelData1[i],
                       centroidUList[(int)(clusterAssment[i, 0].real)]))
            newChannelData2.append(
                np.dot(channelData2[i],
                       centroidUList[(int)(clusterAssment[i, 0].real)]))

    if type == "U":
        informations, SigmaList, UList = tools.getInformations(covMatrixList1)
        allU = tools.matrixListToMatrix_U(UList)
        weights = tools.matrixListToMatrix_U(SigmaList)

        # 确定每个数据分别属于哪个簇
        clusterAssment = kmeans.getClusterAssment_U(allU, weights, centroids,
                                                    newDimension)

        # 变换域
        for i in range(p):
            newChannelData1.append(
                np.dot(channelData1[i],
                       centroidUList[(int)(clusterAssment[i, 0].real)]))
            newChannelData2.append(
                np.dot(channelData2[i],
                       centroidUList[(int)(clusterAssment[i, 0].real)]))

    if type == "general":
        newChannelData1 = pca.pca_general(channelData1, newDimension)
        newChannelData2 = pca.pca_general(channelData2, newDimension)

    if type == "none":
        newChannelData1 = channelData1
        newChannelData2 = channelData2

    allNewCorr = []
    for i in range(p):
        for j in range(newDimension):
            cowCor = np.corrcoef(newChannelData1[i][:, j],
                                 newChannelData2[i][:, j])
            if i == 0:
                allNewCorr.append(cowCor[0, 1])
            else:
                allNewCorr[j] += cowCor[0, 1]

    for i in range(newDimension):
        allNewCorr[i] = abs(allNewCorr[i] / (np.shape(channelData1)[0]))

    path = u'/Users/jinruimeng/Downloads/keyan/'
    nowTime = time.strftime("%Y-%m-%d.%H.%M.%S", time.localtime(time.time()))
    pathSuffix = type + u'_' + nowTime

    newChannelData1Path = path + "clusterAddNoise_newChannelData1_" + pathSuffix
    newChannelData2Path = path + "clusterAddNoise_newChannelData2_" + pathSuffix
    readAndWriteDataSet.write(newChannelData1, newChannelData1Path, ".xlsx")
    readAndWriteDataSet.write(newChannelData2, newChannelData2Path, ".xlsx")

    return allNewCorr
示例#2
0
    # 路径配置
    path = u'/Users/jinruimeng/Downloads/keyan/'
    # path = u'E:\\workspace\\keyan\\'

    # 信噪比的上下限
    low = -10
    high = 5
    step = 5
    suffix = str(low) + u'to' + str(high) + u'dB'
    time1 = ((int)((high - low) / step + 1))

    # 读入信道数据
    channelDataPath = path + u'channelDataP.xlsx'
    channelDataAll = readAndWriteDataSet.excelToMatrixList(channelDataPath)
    p = np.shape(channelDataAll)[0]  # 页数

    # 添加噪声
    channelDataAll1 = []
    channelDataAll2 = []
    for h in range(time1):
        SNR = low + h * step
        for i in range(p):
            noise1, noise2, npower = wgn_abs(channelDataAll[i], SNR)
            channelDataAll1.append(channelDataAll[i] + noise1)
            channelDataAll2.append(channelDataAll[i] + noise2)

    outChannelAll1ListPath = path + "outChannelAll1List_" + suffix
    outChannelAll2ListPath = path + "outChannelAll2List_" + suffix
    readAndWriteDataSet.write(channelDataAll1, outChannelAll1ListPath, ".xlsx")
    readAndWriteDataSet.write(channelDataAll2, outChannelAll2ListPath, ".xlsx")
示例#3
0
    totalOldCorr, totalPcaCorr, totalNewCCorr, totalNewUCorr = cluster(
        a, schedule, channelDataAll1, channelDataAll2, allCentroidsC,
        allCentroidUList, allCentroidsU, allCentroidUList2)
    ps.close()
    ps.join()

    # 输出相关系数
    path = u'/Users/jinruimeng/Downloads/keyan/'
    nowTime = time.strftime("%Y-%m-%d.%H.%M.%S", time.localtime(time.time()))
    pathSuffix = str(SNR) + u'_' + nowTime

    outOldCorrPath = path + "clusterAddNoise_outOldCorr_" + pathSuffix
    outPcaCorrPath = path + "clusterAddNoise_outPcaCorr_" + pathSuffix
    outNewCCorrPath = path + "clusterAddNoise_outNewCCorr_" + pathSuffix
    outNewUCorrPath = path + "clusterAddNoise_outNewUCorr_" + pathSuffix
    readAndWriteDataSet.write(totalOldCorr, outOldCorrPath, ".xlsx")
    readAndWriteDataSet.write(totalPcaCorr, outPcaCorrPath, ".xlsx")
    readAndWriteDataSet.write(totalNewCCorr, outNewCCorrPath, ".xlsx")
    readAndWriteDataSet.write(totalNewUCorr, outNewUCorrPath, ".xlsx")

    for i in range(np.shape(totalPcaCorr)[0]):
        plt.xlabel(u'分量序号')
        X = range(low, high + 1, step)
        plt.ylabel(u'相关系数')
        plt.plot(X, totalOldCorr[i], 'k-s', label=u'预处理前')
        plt.plot(X, totalPcaCorr[i], 'g-^', label=u'无交互PCA')
        plt.plot(X, totalNewCCorr[i], 'r-v', label=u'聚类协方差矩阵')
        plt.plot(X, totalNewUCorr[i], 'b-o', label=u'聚类变换矩阵')
        plt.legend(bbox_to_anchor=(0., 1.02, 1., .102),
                   loc=0,
                   ncol=4,
    ps.close()
    ps.join()

    # 输出不一致率
    total_inconsistency_rate_old_array = readAndWriteDataSet.listToArray(total_inconsistency_rate_old)
    total_inconsistency_rate_new_noCom_array = readAndWriteDataSet.listToArray(total_inconsistency_rate_new_noCom)
    total_inconsistency_rate_new_array = readAndWriteDataSet.listToArray(total_inconsistency_rate_new)

    path = u'/Users/jinruimeng/Downloads/keyan/'
    nowTime = time.strftime("%Y-%m-%d.%H.%M.%S", time.localtime(time.time()))
    pathSuffix = u'SNR:' + str(low) + u'to' + str(high) + u'_' + u'bit:' + str(low2) + u'to' + str(high2) + u'_' + nowTime
    outOldInconsistencyPath = path + "clusterAddNoise_outOldInconsistency_" + pathSuffix
    outNewInconsistencyNoComPath = path + "clusterAddNoise_outNewInconsistencyNoCom_" + pathSuffix
    outNewInconsistencyPath = path + "clusterAddNoise_outNewInconsistency_" + pathSuffix
    readAndWriteDataSet.write(total_inconsistency_rate_old_array, outOldInconsistencyPath, ".xlsx")
    readAndWriteDataSet.write(total_inconsistency_rate_new_noCom_array, outNewInconsistencyNoComPath, ".xlsx")
    readAndWriteDataSet.write(total_inconsistency_rate_new_array, outNewInconsistencyPath, ".xlsx")

    # fig = plt.figure()
    # ax = fig.gca(projection='3d')
    # ax.set_xlabel(u'信噪比(dB)')
    # ax.set_ylabel(u'量化比特数')
    # ax.set_zlabel(u'密钥不一致率')
    #
    # x = []
    # y = []
    # z1 = []
    # z2 = []
    # for i in range(low, high + 1, step):
    #     for j in range(low2, high2 + 1, step2):
示例#5
0
                newSNRsArray[i, j] = newSNRsArray[i, j] / ((1 << a) * p)
                newSNRsArray[i, j] = 10 * np.log10(newSNRsArray[i, j])
        newSNRsList.append(newSNRsArray)

        readAndWriteDataSet.writeKey(path, key_newPca1, key_newPca2, SNR, u'pca')
        readAndWriteDataSet.writeKey(path, key_newC1, key_newC2, SNR, u'C')
        readAndWriteDataSet.writeKey(path, key_newU1, key_newU2, SNR, u'U')
        readAndWriteDataSet.writeKey(path, key_newWt1, key_newWt2, SNR, u'wt')

        # 计算信息熵
        total_key_pca = u''
        total_key_C = u''
        total_key_U = u''
        total_key_wt = u''
        for i in range(np.shape(key_newPca1)[0]):
            total_key_pca = total_key_pca + key_newPca1[i] + key_newPca2[i]
            total_key_C = total_key_C + key_newC1[i] + key_newC2[i]
            total_key_U = total_key_U + key_newU1[i] + key_newU2[i]
            total_key_wt = total_key_wt + key_newWt1[i] + key_newWt2[i]

        ents[0, h] = quantification.calc_shannon_ent(total_key_pca)
        ents[1, h] = quantification.calc_shannon_ent(total_key_C)
        ents[2, h] = quantification.calc_shannon_ent(total_key_U)
        ents[3, h] = quantification.calc_shannon_ent(total_key_wt)

    readAndWriteDataSet.write(newSNRsList, path + u'newSNR_' + suffix)
    readAndWriteDataSet.write(lengths, path + u'length_' + suffix)
    readAndWriteDataSet.write(errorNums, path + u'errorNum_' + suffix)
    readAndWriteDataSet.write(ents, path + u'ent_' + suffix)
    print("主进程结束!")
示例#6
0
def getCentroidsCore(path,
                     suffix,
                     channelData,
                     covMatrixList,
                     informations,
                     SigmaList,
                     UList,
                     g,
                     k,
                     iRate,
                     type="C"):
    nowTime = time.strftime("%Y-%m-%d.%H.%M.%S", time.localtime(time.time()))
    pathSuffix = type + "_" + str(g) + "_" + nowTime

    outOldCovMatrixListPath = path + "getCentroids_outOldCovMatrixList_" + pathSuffix
    outCentroidListPath = path + "getCentroids_outCentroidList_" + pathSuffix
    outClusterAssmentPath = path + "getCentroids_outClusterAssment_" + pathSuffix
    outNewChannelDataPath = path + "getCentroids_outNewChannelData_" + pathSuffix
    outNewCovMatrixListPath = path + "getCentroids_outNewCovMatrixList_" + pathSuffix
    ratesPath = path + "getCentroids_rates_" + pathSuffix
    UTsPath = path + "getCentroids_UTs_" + pathSuffix

    clusterAssmentList = []
    newChannelData = []
    newCovMatrixList = []
    UTs = []
    rates = []

    centroidList = []

    if type == u'C':
        allCovMatrix = tools.matrixListToMatrix(covMatrixList)

        # 对协方差进行聚类
        centroids, clusterAssment = kmeans.KMeansOushi(allCovMatrix, k)
        clusterAssmentList.append(clusterAssment)
        centroidList = tools.matrixToMatrixList(centroids)

        # 分析PCA效果
        # newChannelData, newCovMatrixList, UTs, rates = pca.pca(channelData, informations, centroidList, clusterAssment, iRate)

    if type == u'U':
        allU = tools.matrixListToMatrix_U(UList)
        weights = tools.matrixListToMatrix_U(SigmaList)

        # 对协方差进行聚类
        centroids, clusterAssment = kmeans.KMeansOushi_U(
            allU, k, weights, iRate)
        clusterAssmentList.append(clusterAssment)
        centroidList = tools.matrixToMatrixList_U(centroids)

        # 分析PCA效果
        # newChannelData, newCovMatrixList, UTs, rates = pca.pca_U(channelData, informations, centroidList, clusterAssment, iRate)

    # 输出结果
    # 输出聚类结果
    # readAndWriteDataSet.write(clusterAssmentList, outClusterAssmentPath, suffix)
    # 协方差矩阵太大了,先不输出
    # readAndWriteDataSet.write(covMatrixList, outOldCovMatrixListPath, suffix)
    # 聚类中心太大了,先不输出
    readAndWriteDataSet.write(centroidList, outCentroidListPath, suffix)
示例#7
0
def cluster(schedule, path, suffix, channelData, g, iRate):
    if iRate > np.shape(channelData)[1]:
        print(u'降维后维度不能大于样本原有的维度!')
        return
    if iRate <= 0:
        print(u'降维后维度不能小于1!')
        return

    schedule[1] += 1
    tmpSchedule = schedule[1]
    print(u'共' + str(schedule[0]) + u'部分,' + u'第' + str(tmpSchedule) +
          u'部分开始!')

    pathSuffix = "C" + "_" + str(g) + "_"
    centroidListPath = path + "getCentroids_outCentroidList_" + pathSuffix

    nowTime = time.strftime("%Y-%m-%d.%H.%M.%S", time.localtime(time.time()))
    pathSuffix = pathSuffix + str(nowTime)

    outOldCovMatrixListPath = path + "cluster_outOldCovMatrixList_" + pathSuffix
    outClusterAssmentPath = path + "cluster_outClusterAssment_" + pathSuffix
    outNewChannelDataPath = path + "cluster_outNewChannelData_" + pathSuffix
    outNewCovMatrixsPath = path + "cluster_outNewCovMatrixList_" + pathSuffix
    ratesPath = path + "cluster_rates_" + pathSuffix
    UTsPath = path + "cluster_UTs_" + pathSuffix

    # 读入聚类中心信息
    # 合并多个文件
    centroidList = []
    for root, dirs, files in os.walk(path, topdown=True):
        for file in files:
            file = os.path.join(root, file)
            if centroidListPath in file:
                centroidListTmp = readAndWriteDataSet.excelToMatrixList(file)
                for centroid in centroidListTmp:
                    centroidList.append(centroid)
        break
    centroids = tools.matrixListToMatrix(centroidList)

    # 计算信道相关系数矩阵并输出,然后放到一个矩阵中
    covMatrixList = tools.getCovMatrixList(channelData)
    allCovMatrix = tools.matrixListToMatrix(covMatrixList)

    # 确定每个数据分别属于哪个簇
    clusterAssment = kmeans.getClusterAssment(allCovMatrix, centroids)
    clusterAssmentList = []
    clusterAssmentList.append(clusterAssment)

    # 分析PCA效果
    newChannelData, newCovMatrixList, UTs, rates = pca.pca(
        channelData, covMatrixList, centroidList, clusterAssment, iRate)

    # 输出结果
    # 输出聚类结果
    readAndWriteDataSet.write(clusterAssmentList, outClusterAssmentPath,
                              suffix)
    # 协方差矩阵太大了,先不输出
    # readAndWriteDataSet.write(covMatrixList, outOldCovMatrixListPath, suffix)
    # 输出PCA结果
    readAndWriteDataSet.write(newChannelData, outNewChannelDataPath, suffix)
    readAndWriteDataSet.write(newCovMatrixList, outNewCovMatrixsPath, suffix)
    readAndWriteDataSet.write(UTs, UTsPath, suffix)
    readAndWriteDataSet.write(rates, ratesPath, suffix)

    # 显示进度
    print(u'共' + str(schedule[0]) + u'部分,' + u'第' + str(tmpSchedule) +
          u'部分完成,' + u'已完成' + str(schedule[1]) + u'部分,' + u'完成度:' + '%.2f%%' %
          (schedule[1] / schedule[0] * 100) + u'!')