Exemplo n.º 1
0
def making2(p, argchoice_r):
    '''
    选择特殊半径组成数据集制作数据
    :param p: 数据集路径, str
    :param argchoice_r: 半径列表, list
    :return: 子数据集
    '''
    dataset = LoadFile(p)
    dataset_pd = pd.DataFrame(
        data=dataset,
        columns=['f' + '%s' % i
                 for i in range(dataset.shape[-1] - 1)] + ['label'])
    rs = dataset[:, -1]
    rs_dict = Counter(rs)
    rs_list = list(rs_dict.keys())
    rs_list = sorted(rs_list)
    choice_r = list(filter(lambda x: rs_list.index(x) in argchoice_r, rs_list))
    #统计所有半径值数据中数据量最大的数据量值
    max = 0
    for i in choice_r:
        data_subpd = dataset_pd.loc[dataset_pd['label'] == i]
        if data_subpd.values.shape[0] > max:
            max = data_subpd.values.shape[0]
    data_all = np.array([0])
    data_array_con = np.zeros(dtype=np.float32, shape=[1])
    for i in choice_r:
        data_subpd = dataset_pd.loc[dataset_pd['label'] == i]
        # print(data_subpd.values.shape)
        num = max // data_subpd.values.shape[0]
        num = num if max % data_subpd.values.shape[0] == 0 else num + 1
        data_array = data_subpd.values
        np.random.shuffle(data_array)
        for i in range(num):
            if data_array_con.any() == 0:
                data_array_con = data_array
            else:
                data_array_con = np.vstack((data_array_con, data_array))
        print(data_array_con.shape, max)
        data_all = data_array_con[:max, :] if data_all.any(
        ) == 0 else np.vstack((data_all, data_array_con[:max, :]))
        data_array_con = np.array([0])
    return data_all
Exemplo n.º 2
0
def session(dataset_path, train_path='', test_path=''):
    '''
    节点连接
    :param dataset_path: 数据集路径
    :param train_path: 训练集数据路径,默认为空
    :param test_path: 测试集数据路径,默认为空
    :return: None
    '''
    #导入数据集
    dataset = LoadFile(p=dataset_path)
    # dataset = guiyi(dataset)
    dataset = onehot(dataset)

    g = tf.Graph()
    with g.as_default():
        with tf.name_scope('placeholder'):
            x_f = tf.placeholder(dtype=tf.float32, shape=[None, 4], name='x_f')
            x_l = tf.placeholder(dtype=tf.float32,
                                 shape=[None, 100],
                                 name='x_l')
            y = tf.placeholder(dtype=tf.float32, shape=[None, 3], name='y')
            learning_rate = tf.placeholder(dtype=tf.float32, name='lr')
            is_training = tf.placeholder(dtype=tf.bool, name='is_training')
        output = layers(x_f=x_f, x_l=x_l, is_training=is_training)
        with tf.name_scope('prediction'):
            # loss = -tf.reduce_mean(y * tf.log(output), name='loss')
            loss = tf.reduce_mean(
                tf.keras.losses.categorical_crossentropy(y_true=y,
                                                         y_pred=output))
            opt = tf.train.GradientDescentOptimizer(
                learning_rate=learning_rate).minimize(loss)
            acc = tf.reduce_mean(tf.cast(
                tf.equal(tf.keras.backend.argmax(output, axis=1),
                         tf.keras.backend.argmax(y, axis=1)), tf.float32),
                                 name='pred')
        with tf.name_scope('etc'):
            init = tf.global_variables_initializer()
            gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options),
                    graph=g) as sess:
        sess.run(init)
        #划分训练集和测试集
        train_data, test_data = spliting(dataset, 3000)
        for i in range(6000):  #6000
            for data in input(dataset=train_data, batch_size=500):
                _ = sess.run(opt,
                             feed_dict={
                                 x_f: data[:, :4],
                                 x_l: data[:, 4:-3],
                                 y: data[:, -3:],
                                 learning_rate: 1e-2,
                                 is_training: False
                             })
                if i % 100 == 0:
                    loss_ = sess.run(loss,
                                     feed_dict={
                                         x_f: data[:, :4],
                                         x_l: data[:, 4:-3],
                                         y: data[:, -3:],
                                         is_training: False
                                     })
                    acc_1 = sess.run(acc,
                                     feed_dict={
                                         x_f: data[:, :4],
                                         x_l: data[:, 4:-3],
                                         y: data[:, -3:],
                                         is_training: False
                                     })
            if i % 100 == 0:
                acc_2 = sess.run(acc,
                                 feed_dict={
                                     x_f: test_data[:, :4],
                                     x_l: test_data[:, 4:-3],
                                     y: test_data[:, -3:],
                                     is_training: False
                                 })
                print('第%s轮训练集损失函数值为: %s  训练集准确率为: %s  测试集准确率为: %s' %
                      (i, loss_, acc_1, acc_2))

        tf.summary.FileWriter('log/first_graph', sess.graph)
Exemplo n.º 3
0
    input_secondc2 = input_1.loc[input_1['%s' % input.shape[-1]] == 2]
    input_secondc2 = input_secondc2.drop(columns=['%s' % input.shape[-1]])
    input_secondc2 = input_secondc2.values
    #放入11分类器中进一步分类
    r_finally3 = np.array(second_check2(input_secondc2))[:, np.newaxis]
    r_finally = np.vstack((r_finally1, r_finally2, r_finally3))
    r_real = np.vstack(
        (input_firstc[:, -1][:, np.newaxis], input_secondc1[:, -1][:,
                                                                   np.newaxis],
         input_secondc2[:, -1][:, np.newaxis]))
    return r_finally, r_real


if __name__ == '__main__':
    p = r'/home/xiaosong/桌面/pny相关数据/data_pny/PNY_all.pickle'
    input = LoadFile(p=p)
    np.random.shuffle(input)
    dataset_4feature, dataset_dense, label = input[:, :
                                                   4], input[:, 4:
                                                             -1], input[:,
                                                                        -1][:,
                                                                            np.
                                                                            newaxis]
    dataset_fft = fft_transformer(dataset_dense, 100)
    dataset = np.hstack((dataset_4feature, dataset_fft, label))
    dataset_guiyi_2 = guiyi(dataset)
    print(dataset_guiyi_2.shape)
    r_finally, r_real = check(input=dataset_guiyi_2[:5100, :])
    r_1 = np.where(np.abs(r_finally - r_real) < 1e-2, 1, 0)
    r_sum = np.sum(r_1)
    acc = r_sum / r_1.shape[0]
Exemplo n.º 4
0
def session(dataset_path, train_path='', test_path=''):
    '''
    节点连接
    :param dataset_path: 数据集路径
    :param train_path: 训练集数据路径,默认为空
    :param test_path: 测试集数据路径,默认为空
    :return: None
    '''
    #导入数据集
    dataset = LoadFile(p=dataset_path)
    g1 = tf.Graph()
    with g1.as_default():
        with tf.name_scope('placeholder'):
            x_f = tf.placeholder(dtype=tf.float32, shape=[None, 4], name='x_f')
            x_l = tf.placeholder(dtype=tf.float32,
                                 shape=[None, 100],
                                 name='x_l')
            y = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='y')
            learning_rate = tf.placeholder(dtype=tf.float32, name='lr')
            is_training = tf.placeholder(dtype=tf.bool, name='is_training')
        # output = res_regression(x_f=x_f, x_l=x_l, is_training=is_training)
        output = cnnlstm_regression(x_f=x_f, x_l=x_l, is_training=is_training)
        with tf.name_scope('prediction'):
            loss = tf.reduce_mean(tf.square(output - y))
            opt = tf.train.GradientDescentOptimizer(
                learning_rate=learning_rate).minimize(loss)
            # opt = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
            acc = acc_regression(Threshold=0.1, y_true=y, y_pred=output)
        with tf.name_scope('etc'):
            init = tf.global_variables_initializer()
            gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options),
                    graph=g1) as sess:
        sess.run(init)
        # 划分训练集和测试集
        train_data, test_data = spliting(dataset, 2269)
        for i in range(2000):  #20000
            for data in input(dataset=train_data, batch_size=1000):
                _ = sess.run(opt,
                             feed_dict={
                                 x_f: data[:, :4],
                                 x_l: data[:, 4:-1],
                                 y: data[:, -1][:, np.newaxis],
                                 learning_rate: 1e-2,
                                 is_training: False
                             })
                if i % 100 == 0:
                    loss_ = sess.run(loss,
                                     feed_dict={
                                         x_f: data[:, :4],
                                         x_l: data[:, 4:-1],
                                         y: data[:, -1][:, np.newaxis],
                                         is_training: False
                                     })
                    acc_1 = sess.run(acc,
                                     feed_dict={
                                         x_f: data[:, :4],
                                         x_l: data[:, 4:-1],
                                         y: data[:, -1][:, np.newaxis],
                                         is_training: False
                                     })
            if i % 100 == 0:
                acc_2 = sess.run(acc,
                                 feed_dict={
                                     x_f: test_data[:, :4],
                                     x_l: test_data[:, 4:-1],
                                     y: test_data[:, -1][:, np.newaxis],
                                     is_training: False
                                 })
                print('第%s轮训练集损失函数值为: %s  训练集准确率为: %s  测试集准确率为: %s' %
                      (i, loss_, acc_1, acc_2))
Exemplo n.º 5
0
def session(dataset_path, train_path='', test_path=''):
    '''
    节点连接
    :param dataset_path: 数据集路径
    :param train_path: 训练集数据路径,默认为空
    :param test_path: 测试集数据路径,默认为空
    :return: None
    '''
    #导入数据集
    dataset = LoadFile(p=dataset_path)
    g1 = tf.Graph()
    with g1.as_default():
        with tf.name_scope('placeholder'):
            x_f = tf.placeholder(dtype=tf.float32, shape=[None, 4], name='x_f')
            x_l = tf.placeholder(dtype=tf.float32,
                                 shape=[None, 100],
                                 name='x_l')
            y = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='y')
            learning_rate = tf.placeholder(dtype=tf.float32, name='lr')
            is_training = tf.placeholder(dtype=tf.bool, name='is_training')
        # output = res_regression(x_f=x_f, x_l=x_l, is_training=is_training)
        output = cnnlstm_regression(x_f=x_f, x_l=x_l, is_training=is_training)
        with tf.name_scope('prediction'):
            loss = tf.reduce_mean(tf.square(output - y))
            opt = tf.train.GradientDescentOptimizer(
                learning_rate=learning_rate).minimize(loss)
            # opt = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
            acc1 = acc_regression(Threshold=0.1, y_true=y, y_pred=output)
            acc2 = acc_regression(Threshold=0.16, y_true=y, y_pred=output)
            acc3 = acc_regression(Threshold=0.2, y_true=y, y_pred=output)
            acc4 = acc_regression(Threshold=0.3, y_true=y, y_pred=output)
        with tf.name_scope('etc'):
            init = tf.global_variables_initializer()
            gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options),
                    graph=g1) as sess:
        sess.run(init)
        # 划分训练集和测试集
        train_data, test_data = spliting(dataset, 3369)
        for i in range(60000):  #20000
            for data in input(dataset=train_data, batch_size=1000):
                _ = sess.run(opt,
                             feed_dict={
                                 x_f: data[:, :4],
                                 x_l: data[:, 4:-1],
                                 y: data[:, -1][:, np.newaxis],
                                 learning_rate: 1e-2,
                                 is_training: False
                             })
                if i % 100 == 0:
                    loss_ = sess.run(loss,
                                     feed_dict={
                                         x_f: data[:, :4],
                                         x_l: data[:, 4:-1],
                                         y: data[:, -1][:, np.newaxis],
                                         is_training: False
                                     })
                    acc_1 = sess.run(acc1,
                                     feed_dict={
                                         x_f: data[:, :4],
                                         x_l: data[:, 4:-1],
                                         y: data[:, -1][:, np.newaxis],
                                         is_training: False
                                     })
                    acc_2 = sess.run(acc2,
                                     feed_dict={
                                         x_f: data[:, :4],
                                         x_l: data[:, 4:-1],
                                         y: data[:, -1][:, np.newaxis],
                                         is_training: False
                                     })
                    acc_3 = sess.run(acc3,
                                     feed_dict={
                                         x_f: data[:, :4],
                                         x_l: data[:, 4:-1],
                                         y: data[:, -1][:, np.newaxis],
                                         is_training: False
                                     })
                    acc_4 = sess.run(acc4,
                                     feed_dict={
                                         x_f: data[:, :4],
                                         x_l: data[:, 4:-1],
                                         y: data[:, -1][:, np.newaxis],
                                         is_training: False
                                     })
            if i % 100 == 0:
                acc_5 = sess.run(acc1,
                                 feed_dict={
                                     x_f: test_data[:, :4],
                                     x_l: test_data[:, 4:-1],
                                     y: test_data[:, -1][:, np.newaxis],
                                     is_training: False
                                 })
                acc_6 = sess.run(acc2,
                                 feed_dict={
                                     x_f: test_data[:, :4],
                                     x_l: test_data[:, 4:-1],
                                     y: test_data[:, -1][:, np.newaxis],
                                     is_training: False
                                 })
                acc_7 = sess.run(acc3,
                                 feed_dict={
                                     x_f: test_data[:, :4],
                                     x_l: test_data[:, 4:-1],
                                     y: test_data[:, -1][:, np.newaxis],
                                     is_training: False
                                 })
                acc_8 = sess.run(acc4,
                                 feed_dict={
                                     x_f: test_data[:, :4],
                                     x_l: test_data[:, 4:-1],
                                     y: test_data[:, -1][:, np.newaxis],
                                     is_training: False
                                 })
                print(
                    '第%s轮训练集损失函数值为: %s  训练集准确率为: %s:%s %s:%s %s:%s %s:%s  测试集准确率为: %s:%s %s:%s %s:%s %s:%s'
                    % (i, loss_, 0.1, acc_1, 0.16, acc_2, 0.2, acc_3, 0.3,
                       acc_4, 0.1, acc_5, 0.16, acc_6, 0.2, acc_7, 0.3, acc_8))
        tf.summary.FileWriter('log/regression_cnnlstm', sess.graph)
        # 保存模型到文件当前脚本文件路径下的pb格式
        saving_model = SaveImport_model(
            sess_ori=sess,
            file_suffix=r'/cnnlstm',
            ops=(output, x_f, x_l, is_training),
            usefulplaceholder_count=4,
            pb_file_path=r'/home/xiaosong/桌面/regression_cnnlstm')
        saving_model.save_pb()
Exemplo n.º 6
0

def fft_transformer(dataset, N):
    '''
    对矩阵中各行按照指定点数做FFT变换
    :param dataset: 待处理矩阵
    :param N: 变换后点数
    :return: 处理后矩阵
    '''
    fft_abs = np.abs(np.fft.fft(a=dataset, n=N, axis=1))
    return fft_abs


if __name__ == '__main__':
    p = r'/home/xiaosong/桌面/OLDENBURG_all.pickle'
    dataset = LoadFile(p)
    nums_cl = [[6557, 0], [611, 2], [101, 2], [13, 2], [554, 2], [155, 2],
               [100, 2], [1165, 1], [1993, 1], [947, 2], [1133, 2], [1152, 1],
               [542, 2], [754, 2], [2163, 1]]
    dataset_output = making(nums_cl=nums_cl, dataset=dataset)
    print(dataset_output.shape)
    checkclassifier(dataset_output[:, -1])
    # SaveFile(dataset_output, savepickle_p=r'/home/xiaosong/桌面/OLDENBURG_3cl.pickle')
    dataset_4feature, dataset_dense, label = dataset_output[:, :
                                                            4], dataset_output[:,
                                                                               4:
                                                                               -1], dataset_output[:,
                                                                                                   -1][:,
                                                                                                       np
                                                                                                       .
                                                                                                       newaxis]