import keras.backend.tensorflow_backend as KTF
    import tensorflow as tf
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # 不全部占满显存, 按需分配
    sess = tf.Session(config=config)
    KTF.set_session(sess)

    # 为每个属性选取合适的阈值存储
    # 读取数据并标准化
    diying_attribute = list(pd.read_csv("./parameter/diying.csv", header=None)[0])
    for i in range(len(diying_attribute)):
        diying_attribute[i] = "BX0101_" + diying_attribute[i]
    mean_and_std = pd.read_csv("./data/Interval_Mean_Std.csv", index_col="Label")
    attribute_N = pd.read_csv("./data/attribute_N.csv", index_col="Label")
    read_path = "./data/test_set"
    read_file_list = traversalDir_FirstDir(read_path)

    # 得到测试数据的真实标签
    sources = np.zeros((142276, 24))
    for i in range(len(read_file_list)):
        data = pd.read_csv(read_file_list[i], engine="python")["Class"]
        sources[:, i] = data.values
    sources = np.sum(sources, axis=1)
    source_label = [1 if source > 0 else 0 for source in sources]

    # 对数据做预测并得到相应的预测标签
    aims = np.zeros((142276, 24))
    for file in read_file_list:
        data = pd.read_csv(file, engine="python", index_col="Time")
        data_copy = data.iloc[:, 0:1].copy()
        data.iloc[:, 0:1] = z_norm(data_copy)
from sklearn.cluster import KMeans
from sklearn import svm
from base import traversalDir_FirstDir, merge
from get_data import z_norm

if __name__ == "__main__":

    # 读取地影属性
    diying_attribute = list(
        pd.read_csv("./parameter/diying.csv", header=None)[0])
    for i in range(len(diying_attribute)):
        diying_attribute[i] = "BX0101_" + diying_attribute[i]

    # 训练集数据读取
    read_path_train = "./data/train"
    file_path_list = traversalDir_FirstDir(read_path_train)
    train_data = merge(file_path_list)[diying_attribute]
    train_data["Class"] = 0

    # 验证集数据读取
    read_path_val = "./data/val"
    file_path_list = traversalDir_FirstDir(read_path_val)
    val_data = merge(file_path_list)[diying_attribute]
    read_path_val = "./data/val_set"
    file_path_list = traversalDir_FirstDir(read_path_val)
    sources = np.zeros((141017, 24))
    for i in range(len(file_path_list)):
        data = pd.read_csv(file_path_list[i], engine="python")["Class"]
        sources[:, i] = data.values
    sources = np.sum(sources, axis=1)
    source_label = [1 if source > 0 else 0 for source in sources]