def __init__(self, config):
     self.config = config
     # load data here
     self.h5_reader = H5DataReader(config.h5_data_path, mode='r', data_key=config.h5_data_key, label_key=config.h5_label_key, seed=config.h5_shuffle_seed)
     if config.get("h5_condition_args"):
         self.h5_reader.set_condition_idx(*config.h5_condition_args)
     self.train_batch_generator = None
     self.test_batch_generator = None
     self.batch_generator = None
#数据长度
sig_len = 10000
#训练集数据量比测试集数据量
train_test_rate = 4
#设置训练模式
mode = 'f'
acc_rate = 0.90  # 预设准确率
batch_size = 20
LR = 1e-4
labels = ['dt1', 'dt2', 'dt3', 'dt4', 'dt5', 'dt6', 'dt7', 'dt8', 'dt9']
tick_marks = np.array(range(len(labels))) + 0.5

row = 2000

data = H5DataReader(dir_path, 'r', 'signals', 'labels')

kind_num = 9
shape1 = 128
shape2 = 128

x = tf.placeholder(tf.float32,
                   shape=(None, shape1, shape2, 4),
                   name='x_features')
y_ = tf.placeholder(tf.int64, shape=[None, kind_num], name='y_')
keep_prob = tf.placeholder("float", name='keep_prob')
training = tf.placeholder(tf.bool, name='training')

n_batch = len(data.train_indices) // batch_size

# prelogits, end_points = network_incep.inference(x, keep_prob,
示例#3
0
        ckpt = tf.train.get_checkpoint_state(model_path)
        load_model.load_model(model_path)
        # files = os.listdir(model_path)
        # meta_file = [s for s in files if s.endswith('.meta')][0]
        # saver = tf.train.import_meta_graph(os.path.join(model_path, meta_file))

        # Get input and output tensors
        input = tf.get_default_graph().get_tensor_by_name("input:0")
        is_training = tf.get_default_graph().get_tensor_by_name("is_training:0")
        decode = tf.get_default_graph().get_tensor_by_name("decode:0")
        #回复模型
        # saver.restore(sess, ckpt.model_checkpoint_path)
        loss = tf.get_default_graph().get_tensor_by_name("loss/bce:0")

        # stft_reader = H5DataReader(stft_path,)
        sig_reader = H5DataReader(signal_path, 'r', 'signals',seed=303)
        # sig_reader = H5DataReader(stft_path, 'r',seed=30)
        batch, _ = sig_reader.get_shuffle_data(20)
        batch = np.asarray([signal_regulation(x) for x in batch])
        decode_arr ,los_array= sess.run([decode,loss],feed_dict={input:batch,is_training:False})
        diff = np.abs(decode_arr-batch)
        for i in range(len(decode_arr)):

            save_sig_fig(diff[i,:1000,0],  '../figures/sv2_diff_{:d}'.format(i))
            save_sig_fig(batch[i,:1000,0], '../figures/sv2_input_{:d}'.format(i))
            save_sig_fig(decode_arr[i,:1000,0],  '../figures/sv2_output_{:d}'.format(i))

        # np.exp(decode_arr) / sum(np.exp(decode_arr))
        # plot(np.decode_arr, 0)

示例#4
0
            net = tf.layers.dense(flatten, 1024, activation='relu')
            logits = tf.layers.dense(net, N)
            softmax = tf.nn.softmax(logits)

        loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)
        predict = tf.arg_max(softmax, -1)
        accuracy = tf.reduce_mean(
            tf.cast(tf.equal(predict, labels), tf.float32))
        train_varlist = tf.trainable_variables("Finetune")
        train_op = tf.train.AdamOptimizer(init_lr).minimize(
            loss, var_list=train_varlist, global_step=finetune_step)
        #began training
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, ckpt)

        h5_reader = H5DataReader(stft_path, seed=678)

        i = 0
        los_list = []
        acc_list = []
        while (iters > i):
            i += 1
            batch_x, batch_y = h5_reader.get_shuffle_data(batch_size)
            # preprocessing
            batch_x = [myfft1_norm(x) for x in batch_x]
            feed_dict = {is_training: False, input: batch_x, labels: batch_y}
            _, prd, los, acc = sess.run([train_op, predict, loss, accuracy],
                                        feed_dict)
            los_list.append(los)
            acc_list.append(acc)
示例#5
0
        plt.clf()


def savefig(features, i, name):
    k = 0
    plt.figure(figsize=(20, 6))
    for feature in features:
        k += 1
        plt.plot(feature[:1000, i], linewidth=1)
        plt.savefig(name + str(k) + '.png')
        plt.clf()


def plot2d(features, i):
    for feature in features:
        plt.imshow(feature[:, :, i])
        plt.show()
        plt.clf()


if __name__ == '__main__':
    stft_path = '../../dataset/LTE_dataset_stft_256x256x4_3c_1216.h5'
    signal_path = '../../dataset/LTE_origin_3240_dataset_5c_10s_1202.h5'
    print(os.path.abspath(signal_path))
    # stft_reader = H5DataReader(stft_path,)
    # sig_reader = H5DataReader(signal_path, 'r', 'signals')
    sig_reader = H5DataReader(stft_path, 'r')
    batch, _ = sig_reader.get_shuffle_data(1)
    print(batch[0].shape)
    plot2d(batch, 2)
        with tf.variable_scope("Finetune"):
            flatten = tf.layers.flatten(code)
            net=tf.layers.dense(flatten, 128, activation='relu')
            logits = tf.layers.dense(net,N)
            softmax = tf.nn.softmax(logits)
        loss = tf.losses.sparse_softmax_cross_entropy(labels,logits)
        predict = tf.arg_max(softmax,-1)
        accuracy = tf.reduce_mean(tf.cast(tf.equal(predict,labels),tf.float32))
        train_varlist = tf.trainable_variables("Finetune")
        train_op = tf.train.AdamOptimizer(init_lr).minimize(loss,var_list=train_varlist)

        #began training
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, ckpt)

        h5_reader = H5DataReader(signal_path,data_key='signals')
        batch_x,batch_y = h5_reader.get_shuffle_data(batch_size)
        feed_dict = {is_training:False, input:batch_x, labels:batch_y}
        i = 0
        los_list= []
        acc_list =[]
        while(iters > i ):
            i+= 1
            _, prd, los, acc =sess.run([train_op,predict,loss,accuracy],feed_dict)
            los_list.append(los)
            acc_list.append(acc)


            if i%20 ==0:
                los = np.mean(los_list)
                acc = np.mean(acc_list)
            softmax = tf.nn.softmax(logits)

        loss = tf.losses.sparse_softmax_cross_entropy(labels, logits)
        predict = tf.arg_max(softmax, -1)
        accuracy = tf.reduce_mean(
            tf.cast(tf.equal(predict, labels), tf.float32))
        train_varlist = tf.trainable_variables("Finetune")
        train_op = tf.train.AdamOptimizer(init_lr).minimize(
            loss, var_list=train_varlist, global_step=finetune_step)
        #began training
        new_saver = tf.train.Saver(max_to_keep=1)
        sess.run(tf.global_variables_initializer())
        import_saver.restore(sess, ckpt)

        h5_reader = H5DataReader(stft_path,
                                 seg_set_method='txt',
                                 txt_path=train_idx_txt)
        unknown_reader = H5DataReader(stft_path)
        unknown_reader.set_condition_idx(condition_keys=['labels', 'fc'],
                                         include_conditions=[(6, 225),
                                                             (7, 225)],
                                         exclude_conditions=None)
        i = 0
        los_list = []
        acc_list = []
        e_los_list = []
        e_acc_list = []
        u_los_list = []
        u_acc_list = []
        while (iters > i):
            i += 1