biases = {
'conv1':tf.Variable(init([128])),
'conv2':tf.Variable(init([256])),
'conv3':tf.Variable(init([128])),
# 'conv4':tf.Variable(init([64])),
# 'conv5':tf.Variable(init([64])),
# 'conv6':tf.Variable(init([32])),
# 'conv4_b':tf.Variable(tf.random_normal([128])),
'out_b':tf.Variable(init([2]))
}
# 定义X,Y的占位符
X = tf.placeholder(tf.float32,[None,1,num_input],name='X')
Y = tf.placeholder(tf.float32,[None,num_classes],name='Y')
# batch_size = tf.Variable(128,dtype=tf.float32)
lr = tf.Variable(0.001,dtype=tf.float32)
LSTM_FCN = LSTM_FCN(X,weights,biases,num_hidden)
logits = LSTM_FCN.connect_FCN_LSTM()
prediction = tf.nn.softmax(logits)
# ??????
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=Y))

# loss_op = tf.reduce_mean(tf.square(Y-logits))/2
optimizer = tf.train.AdamOptimizer(learning_rate=lr)

train_steps = optimizer.minimize(loss_op)

# 定义准确率
acc = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(prediction,1),tf.argmax(Y,1)),tf.float32))
meraged = tf.summary.merge_all()

tf.add_to_collection('loss',loss_op)