Beispiel #1
0
def main():
    # 读取原始文件数据,标签
    print("-------读取原始文件数据,标签--------")
    signal_data, signal_labels = readFile(filepath)
    # 将数据和标签的形状进行调整
    print("-----将数据和标签的形状进行调整------")
    signal_re, labels_re = data_reshape(signal_data, signal_labels)
    re_data = pre_data_reshape(signal_re)
    # 数据进行Z-score
    print("----------数据进行Z-score----------")
    z_score_data = more_norm_dataset(re_data)
    # 数据进行1D->2D的转化
    print("--------数据进行1D->2D的转化--------")
    data_1Dto2D = more_dataset_1Dto2D(z_score_data)
    print(data_1Dto2D.shape)
    dict_data = {"data": data_1Dto2D, "labels": labels_re}
    with open('CNN_train.pkl', 'wb') as f:
        pickle.dump(dict_data, f, pickle.HIGHEST_PROTOCOL)
    # 读取.pkl文件
    # with open('CNN_train.pkl', 'rb') as f:
    #     data =  pickle.load(f)
    # data_1Dto2D = data['data']
    # labels_re = data['labels']
    # 开始CNN的训练
    print("-----------开始CNN的训练-----------")
    backward(data_1Dto2D, labels_re)
Beispiel #2
0
def main():
    signal_data, signal_labels = readFile(backward.filepath)
    signal_re, labels_re = data_reshape_test(signal_data, signal_labels)
    print("signal_re.shape:")
    print(signal_re.shape)
    print("labels_re.shape:")
    print(labels_re.shape)
    re_data = pre_data_reshape(signal_re)
    print("re_data.shape:")
    print(re_data.shape)
    z_score_data = more_norm_dataset(re_data)
    data_1Dto2D = more_dataset_1Dto2D(z_score_data)
    print("data_1Dto2D.shape:")
    print(data_1Dto2D.shape)
    test(data_1Dto2D, labels_re)
Beispiel #3
0
def main():
    print("该程序为预处理程序")
    filepath1 = 'F:/情感计算/数据集/DEAP/'
    signal_data, signal_labels = readFile(filepath1)
    print(signal_data.shape)
    # (1280, 40, 8064)
    # (1280, 4)
    pre_data = pre_baseline(signal_data)
    # re_data = pre_data_reshape(signal_data)
    print(pre_data.shape)
    # print(re_data.shape)
    z_score_data = more_norm_dataset(pre_data)
    print(z_score_data.shape)
    data_1Dto2D = more_dataset_1Dto2D(z_score_data)
    print(data_1Dto2D.shape)
Beispiel #4
0
def main():
    signal_data, signal_labels = readFile(backward.filepath)
    signal_re, labels_re = data_reshape_test(signal_data, signal_labels)
    print("signal_re.shape:")
    print(signal_re.shape)
    print("labels_re.shape:")
    print(labels_re.shape)
    re_data = pre_data_reshape(signal_re)
    print("re_data.shape:")
    print(re_data.shape)
    z_score_data = more_norm_dataset(re_data)
    data_1Dto2D = more_dataset_1Dto2D(z_score_data)
    print("data_1Dto2D.shape:")
    print(data_1Dto2D.shape)
    dict_data = {"data": data_1Dto2D, "labels": labels_re}
    with open('CNN_test.pkl', 'wb') as f:
        pickle.dump(dict_data, f, pickle.HIGHEST_PROTOCOL)
    print("okkkkkkkkkkkkk")
Beispiel #5
0
def main():
    # 读取原始文件数据,标签
    print("-------读取原始文件数据,标签--------")
    signal_data, signal_labels = readFile(filepath)
    # 将数据和标签的形状进行调整
    print("-----将数据和标签的形状进行调整------")
    signal_re, labels_re = data_reshape(signal_data, signal_labels)
    re_data = pre_data_reshape(signal_re)
    # 数据进行Z-score
    print("----------数据进行Z-score----------")
    z_score_data = more_norm_dataset(re_data)
    # 数据进行1D->2D的转化
    print("--------数据进行1D->2D的转化--------")
    data_1Dto2D = more_dataset_1Dto2D(z_score_data)
    print(data_1Dto2D.shape)
    # 开始CNN的训练
    print("-----------开始CNN的训练-----------")
    backward(data_1Dto2D, labels_re)
Beispiel #6
0
def main():
    signal_data, signal_labels = readFile('F:/情感计算/数据集/DEAP/s02.mat')
    signal_re, labels_re = data_reshape(signal_data, signal_labels)
    # print(labels_re)  # debug
    backward(signal_re, labels_re)
Beispiel #7
0
def main():
    signal_data, signal_labels = readFile('F:/情感计算/数据集/DEAP/s02.mat')
    signal_re, labels_re = data_reshape_test(signal_data, signal_labels)
    test(signal_re, labels_re)
Beispiel #8
0
# TensorFlow中LSTM具体实现
import tensorflow as tf
import numpy as np
from input_data import readFile, data_reshape, data_reshape_test, PEOPEL_NUM

n_steps = 40  # X的数量
n_inputs = 8064  # 一个X有n_inputs个数
n_neurons = 128  # RNN神经元数目
n_outputs = 2  # 输出
n_layers = 4  # n_layer层神经元
BATCH_SIZE_ALL = PEOPEL_NUM * 40 // 4 * 3
BATCH_SIZE = 10
n_epochs = 1000
learning_rate_base = 0.001

signal_data, signal_labels = readFile('F:/情感计算/数据集/DEAP/')
signal_re, labels_re = data_reshape(signal_data, signal_labels)
signal_test_re, labels_test_re = data_reshape_test(signal_data, signal_labels)

X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])  # 初始化x
y = tf.placeholder(tf.int32, [None])  # 初始化y

lstm_cells = [
    tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons)
    for layer in range(n_layers)
]  # 生成n_layers层,每层包括n_neurons个神经元的神经元列表
multi_cell = tf.contrib.rnn.MultiRNNCell(lstm_cells)  # 根据神经元列表 构建多层循环神经网络
outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32)
# outputs(tensor):[ batch_size, max_time, cell.output_size ]
# states:state是一个tensor。state是最终的状态,也就是序列中最后一个cell输出的状态。一般情况下state的形状为 [batch_size,
# cell.output_size ],但当输入的cell为BasicLSTMCell时,state的形状为[2,batch_size, cell.output_size ],其中2也对应着
Beispiel #9
0
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras import optimizers
import numpy as np
from keras.layers.core import Lambda
from keras import backend as K
from keras.optimizers import SGD
from keras import regularizers
from keras.models import load_model
from input_data import readFile, train_data, train_labels, test_data, test_labels

# import data
signal_data, signal_labels = readFile('F:/情感计算/Results/PicCut_3.pkl')
x_train = train_data(signal_data)
y_train = train_labels(signal_labels)
x_test = test_data(signal_data)
y_test = test_labels(signal_labels)

# 用于正则化时权重降低的速度
weight_decay = 0.0005
nb_epoch = 20
batch_size = 32

# layer1 32*32*3
model = Sequential()
# 第一个 卷积层 的卷积核的数目是32 ,卷积核的大小是3*3,stride没写,默认应该是1*1
# 对于stride=1*1,并且padding ='same',这种情况卷积后的图像shape与卷积前相同,本层后shape还是32*32
model.add(