def no_correlation(df: DataFrame) -> DataFrame:
    """
    Counts 'nie' word occurrence in descriptions.
    Adds result to the no_count column and returns
    correlation matrix.
    :param df:DataFrame
    :return:DataFrame correlation matrix
    """
    df['no_count'] = 0

    for i, row in df.iterrows():
        no_count = 0
        desc_list = row.description.split(' ')
        for word in desc_list:
            if word.lower() == 'nie':
                no_count += 1
        df.at[i, 'no_count'] = no_count
    return df.corr(method='pearson')
def parametercorr(conv):
    re_conv = np.reshape(conv, (-1, conv.shape[-2], conv.shape[-1]))
    re_conv = np.abs(re_conv)
    re_conv = np.sum(re_conv, axis=0)
    data = DataFrame(re_conv)
    relation = data.corr()
    length = relation.shape[0]
    final_cols = []
    del_cols = []
    for i in range(length):
        if relation.columns[i] not in del_cols:
            final_cols.append(relation.columns[i])
            for j in range(i + 1, length):
                if (relation.iloc[i, j] > 0.85) and (relation.columns[j]
                                                     not in del_cols):
                    del_cols.append(relation.columns[j])
    for i in range(len(final_cols)):
        data[final_cols[i]] = 0
    nddata = np.array(data)
    data_list = nddata.tolist()
    return data_list, len(del_cols)
示例#3
0
print(count_violations, count_violations.sum(), np.array(result_list).shape[0])

criticality = 0
for i in range(len(count_violations)):
    criticality += count_violations[i] * (127 - i) / 127

print(criticality / count_violations.sum())

category = 0
for i in range(len(count_violations)):
    if count_violations[i] > 0:
        category += 1

print(category, category / 128)

sns.set_style("darkgrid")
data_result = DataFrame(result_list)
data_result.rename(columns={
    0: 'a',
    1: 'b',
    2: 'c',
    3: 'd',
    4: 'e',
    5: 'f',
    6: 'g'
},
                   inplace=True)  #注意这里0和1都不是字符串
data_result.dropna(axis=0, how='any')

print(data_result.corr())
sampledata=[]#样本库
for i in range(len(value)):
    for j in range(length[i]):
        for z in range(8,9):#只用了通道8的信号
            xinhao=value[name[i]][str(j)][z].tolist() #循环取通道信号
            te1=tezhengliangtiqu.tezhengti(xinhao) #某一通道信号的特征量提取
            te1=np.append(te1,int(i))#i为标签,0-4与name对应
            if not len(sampledata):
                sampledata=[te1.tolist()]
            else:
                sampledata=np.concatenate((sampledata,[te1.tolist()]),axis=0)

#相关度计算
from pandas.core.frame import DataFrame
samp=DataFrame(sampledata[:,0:-1])
print(samp.corr())
samp.corr().to_excel('test1.xlsx', index=True, header=None)

markers=('s','x','o','^','v')
colors=('red','blue','lightgreen','gray','cyan')
i=2
for idx,c1 in enumerate(np.unique(y)):
    tezheng1=sampledata[sampledata[:,-1]==c1,0]
    plt.plot(range(len(tezheng1)),tezheng1,alpha=0.8,c=colors[idx],marker=markers[idx],label=c1)
plt.legend(['normal','dianshi','mosun','duanchi'],loc='best')
plt.show()

plt.scatter(range,sampledata[:,0])

svmxunlian.svmxunlian(sampledata,length)
            
示例#5
0
    'Kill_event': atd.team_kill_event,
    #'All_event': atd.team_event,
    #'Dx':atd.dxdy[0],
    #'Dy':atd.dxdy[1],
    #'C':C_data,
    'Max_eig': graph_max_eig_data,
    'Short_path': short_path_data,
    #'Ac':Ac_data,
    'Degree': degree_data,
    'Closeness': closeness_data,
    #'Betweenness':betweenness_data,
    #'Eigenvector':eigenvector_data,
}
data = DataFrame(pandata)  #将字典转换成为数据框
# 相关性分析结束
corr_data = data.corr()  #可以使用参数 'pearson', 'kendall', 'spearman'
print("表5(注意选出几列即可不用全写上去)")
print(corr_data)
# 小世界网络指数
ws = nx.watts_strogatz_graph(50, 5, 0.5)
A = np.array(nx.adjacency_matrix(ws).todense())  # 对应的邻接矩阵
L_rand = nx.average_shortest_path_length(ws, weight="weight")
C_rand = C(A)
# L_avg = pandata['Short_path'][team_P]
zo = ad2zo(Hc_team_P)  #0-1矩阵
zgraph = zo2graph(team_use_hero[team_P], zo)  #0-1图
L_avg = avg_short_path_length(zgraph)
# C_avg = pandata['C'][team_P]
C_avg = C(zo)
s = (C_avg / C_rand) / (L_avg / L_rand)
print("表一协作网络平均路径长度:" + str(L_avg) + "聚集系数:" + str(C_avg) + "\n")
示例#6
0
    plt.close()

    for col in columns_name:
        df[col].plot.box()
        plt.savefig("outs\\boxes\\{}.png".format(col))
        plt.close()

    for col in columns_name:
        result = sql_manager.crs.execute((
            "select distinct {},count({}) from information group by {}".format(
                col, col, col))).fetchall()
        counts = [x[1] for x in result]
        attr = [x[0] for x in result]
        fig1, ax1 = plt.subplots()
        ax1.pie(counts,
                labels=attr,
                autopct='%1.1f%%',
                shadow=True,
                startangle=90)
        ax1.axis('equal'
                 )  # Equal aspect ratio ensures that pie is drawn as a circle.

        plt.savefig("outs\\pie_plots\\{}.png".format(col))
        plt.close()

    corrMatrix = df.corr()

    sn.heatmap(corrMatrix, annot=True)
    plt.show()
    # plt.savefig("outs\\corr_table.pdf",dpi=400)
    plt.close()