Beispiel #1
0
def main():
    signal_data, signal_labels = readFile(backward.filepath)
    signal_re, labels_re = data_reshape_test(signal_data, signal_labels)
    print("signal_re.shape:")
    print(signal_re.shape)
    print("labels_re.shape:")
    print(labels_re.shape)
    re_data = pre_data_reshape(signal_re)
    print("re_data.shape:")
    print(re_data.shape)
    z_score_data = more_norm_dataset(re_data)
    data_1Dto2D = more_dataset_1Dto2D(z_score_data)
    print("data_1Dto2D.shape:")
    print(data_1Dto2D.shape)
    test(data_1Dto2D, labels_re)
Beispiel #2
0
def main():
    signal_data, signal_labels = readFile(backward.filepath)
    signal_re, labels_re = data_reshape_test(signal_data, signal_labels)
    print("signal_re.shape:")
    print(signal_re.shape)
    print("labels_re.shape:")
    print(labels_re.shape)
    re_data = pre_data_reshape(signal_re)
    print("re_data.shape:")
    print(re_data.shape)
    z_score_data = more_norm_dataset(re_data)
    data_1Dto2D = more_dataset_1Dto2D(z_score_data)
    print("data_1Dto2D.shape:")
    print(data_1Dto2D.shape)
    dict_data = {"data": data_1Dto2D, "labels": labels_re}
    with open('CNN_test.pkl', 'wb') as f:
        pickle.dump(dict_data, f, pickle.HIGHEST_PROTOCOL)
    print("okkkkkkkkkkkkk")
Beispiel #3
0
def main():
    signal_data, signal_labels = readFile('F:/情感计算/数据集/DEAP/s02.mat')
    signal_re, labels_re = data_reshape_test(signal_data, signal_labels)
    test(signal_re, labels_re)
Beispiel #4
0
import numpy as np
from input_data import readFile, data_reshape, data_reshape_test, PEOPEL_NUM

n_steps = 40  # X的数量
n_inputs = 8064  # 一个X有n_inputs个数
n_neurons = 128  # RNN神经元数目
n_outputs = 2  # 输出
n_layers = 4  # n_layer层神经元
BATCH_SIZE_ALL = PEOPEL_NUM * 40 // 4 * 3
BATCH_SIZE = 10
n_epochs = 1000
learning_rate_base = 0.001

signal_data, signal_labels = readFile('F:/情感计算/数据集/DEAP/')
signal_re, labels_re = data_reshape(signal_data, signal_labels)
signal_test_re, labels_test_re = data_reshape_test(signal_data, signal_labels)

X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])  # 初始化x
y = tf.placeholder(tf.int32, [None])  # 初始化y

lstm_cells = [
    tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons)
    for layer in range(n_layers)
]  # 生成n_layers层,每层包括n_neurons个神经元的神经元列表
multi_cell = tf.contrib.rnn.MultiRNNCell(lstm_cells)  # 根据神经元列表 构建多层循环神经网络
outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32)
# outputs(tensor):[ batch_size, max_time, cell.output_size ]
# states:state是一个tensor。state是最终的状态,也就是序列中最后一个cell输出的状态。一般情况下state的形状为 [batch_size,
# cell.output_size ],但当输入的cell为BasicLSTMCell时,state的形状为[2,batch_size, cell.output_size ],其中2也对应着
# LSTM中的cell state和hidden state
top_layer_h_state = states[-1][0] + states[-1][1]