def evaluate(num): # num 表示要取那个人的数据 # return 第num 个人数据经过测试得到数据。 with tf.name_scope("input"): input_x = tf.placeholder(tf.float32, [bat[num], 5000, 9, 1], name='EEG-input') # 数据的输入,第一维表示一个batch中样例的个数 input_y = tf.placeholder(tf.float32, [None, 7], name='EEG-lable') # 一个batch里的lable regularlizer = tf.contrib.layers.l2_regularizer( Regularazition_Rate) #本来测试的时候不用加这个 is_training = tf.cast(False, tf.bool) out = CNN_LSTM(input_x, is_training, None) y = out['logist'] with tf.name_scope("test_acc"): correct_predection = tf.equal(tf.argmax(y, 1), tf.argmax(input_y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_predection, tf.float32)) tf.summary.scalar('test_acc', accuracy) variable_averages = tf.train.ExponentialMovingAverage(Moving_Average_Decay) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(Model_Save_Path) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) global_step = ckpt.model_checkpoint_path.split('/')[-1].split( '-')[-1] x, y = raw_test_batch10(num) #获取第x个人的数据 reshape_xs = np.reshape(x, (-1, 5000, 9, 1)) ys = one_hot(y) conv1, pool1, conv2, pool2, conv3, pool3, conv4, pool4, lstm, acc_score = sess.run( [ out['conv1'], out['pool1'], out['conv2'], out['pool2'], out['conv3'], out['pool3'], out['conv4'], out['pool4'], out['rnn'], accuracy ], feed_dict={ input_x: reshape_xs, input_y: ys }) pool4 = np.reshape(pool4, (-1, 313, 1, 128)) Lstm = np.reshape(lstm, (-1, 313, 1, 128)) print("Afer %s training step, test accuracy = %g" % (global_step, acc_score)) else: print("No checkpoint file found") return conv1, pool1, conv2, pool2, conv3, pool3, conv4, pool4, Lstm
def evaluate(num): # num 表示要取那个人的数据 # return 第num 个人数据经过测试得到数据。 with tf.name_scope("input"): input_x = tf.placeholder(tf.float32, [bat[num], 5000, 9, 1], name='EEG-input') # 数据的输入,第一维表示一个batch中样例的个数 input_y = tf.placeholder(tf.float32, [None, 7], name='EEG-lable') # 一个batch里的lable regularlizer = tf.contrib.layers.l2_regularizer( Regularazition_Rate) #本来测试的时候不用加这个 is_training = tf.cast(False, tf.bool) out = BaseCNN(input_x, is_training, regularlizer) y = out['logit'] with tf.name_scope("test_acc"): correct_predection = tf.equal(tf.argmax(y, 1), tf.argmax(input_y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_predection, tf.float32)) tf.summary.scalar('test_acc', accuracy) variable_averages = tf.train.ExponentialMovingAverage(Moving_Average_Decay) variables_to_restore = variable_averages.variables_to_restore() with tf.Session() as sess: tf.initialize_all_variables().run() x, y = raw_test_batch10(num) #获取第x个人的数据 reshape_xs = np.reshape(x, (-1, 5000, 9, 1)) ys = one_hot(y) conv1, pool1, conv2, pool2, conv3, pool3, conv4, pool4, acc_score = sess.run( [ out['conv1'], out['pool1'], out['conv2'], out['pool2'], out['conv3'], out['pool3'], out['conv4'], out['pool4'], accuracy ], feed_dict={ input_x: reshape_xs, input_y: ys }) print("Afer training step, test accuracy = %g" % (acc_score)) return conv1, pool1, conv2, pool2, conv3, pool3, conv4, pool4
def elva(data): x_test, y_test = raw_test_batch10(data) x_test = sequence.pad_sequences(x_test, maxlen=maxlen, dtype='float32') model = Sequential() model.add( Conv1D(filters, kernel_size, padding='same', strides=1, name='conv1')) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(MaxPooling1D(pool_size=pool_size, name='pool1')) model.add(Conv1D(36, kernel_size, padding='same', strides=1, name='conv2')) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(MaxPooling1D(pool_size=pool_size, name='pool2')) model.add(Conv1D(72, kernel_size, padding='same', strides=1, name='conv3')) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(MaxPooling1D(pool_size=pool_size, name='pool3')) model.add(Conv1D(144, kernel_size, padding='same', strides=1, name='conv4')) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(MaxPooling1D(pool_size=pool_size, name='pool4')) model.add(LSTM(lstm_output_size, name='lstm')) model.add(Dense(7, name='dense1')) model.add(Activation('softmax', name='softmax')) adam = Adam(0.00325) model.compile(loss='sparse_categorical_crossentropy', optimizer=adam, metrics=['accuracy']) model = load_model('CNN_LSTM_model_v23.h5') score, acc = model.evaluate(x_test, y_test) # model.load_weights("my_model_weights.h5", by_name=True) # intermediate_layer_model1 = K.function(inputs=[model.layers[0].inputs], outputs=[model.get_layer('conv1').output]) # print(model.inputs) intermediate_layer_model1 = Model(inputs=model.inputs, outputs=model.get_layer('conv1').output) intermediate_layer_model2 = Model(inputs=model.inputs, outputs=model.get_layer('pool1').output) intermediate_layer_model3 = Model(inputs=model.inputs, outputs=model.get_layer('conv2').output) intermediate_layer_model4 = Model(inputs=model.inputs, outputs=model.get_layer('pool2').output) intermediate_layer_model5 = Model(inputs=model.inputs, outputs=model.get_layer('conv3').output) intermediate_layer_model6 = Model(inputs=model.inputs, outputs=model.get_layer('pool3').output) intermediate_layer_model7 = Model(inputs=model.inputs, outputs=model.get_layer('conv4').output) intermediate_layer_model8 = Model(inputs=model.inputs, outputs=model.get_layer('pool4').output) intermediate_layer_model9 = Model(inputs=model.inputs, outputs=model.get_layer('lstm').output) intermediate_output1 = intermediate_layer_model1.predict(x_test) print(intermediate_output1.shape) intermediate_output2 = intermediate_layer_model2.predict(x_test) print(intermediate_output2.shape) intermediate_output3 = intermediate_layer_model3.predict(x_test) print(intermediate_output3.shape) intermediate_output4 = intermediate_layer_model4.predict(x_test) print(intermediate_output4.shape) intermediate_output5 = intermediate_layer_model5.predict(x_test) print(intermediate_output5.shape) intermediate_output6 = intermediate_layer_model6.predict(x_test) print(intermediate_output6.shape) intermediate_output7 = intermediate_layer_model7.predict(x_test) print(intermediate_output7.shape) intermediate_output8 = intermediate_layer_model8.predict(x_test) print(intermediate_output8.shape) intermediate_output9 = intermediate_layer_model9.predict(x_test) print(intermediate_output9.shape) out = { 'conv1': intermediate_output1, 'pool1': intermediate_output2, 'conv2': intermediate_output3, 'pool2': intermediate_output4, 'conv3': intermediate_output5, 'pool3': intermediate_output6, 'conv4': intermediate_output7, 'pool4': intermediate_output8, 'lstm': intermediate_output9, } return out