batch_ph = tf.placeholder(tf.float32, shape=(None, 40 * (Config.leftFrames + Config.rightFrames + 1)),name='batch_input')
        label_ph = tf.placeholder(tf.float32, shape=(None, 3), name="label_input")

print("Construct dataLoader...")
dataloader = dataLoader()

model = Model()
print("Construct model...")
if(Config.modelName == "DNN_6_512" or Config.modelName == "DNN_3_128"):
    loss, _ = model.lossFunc_CrossEntropy(batch_ph, label_ph)
elif(Config.modelName == "DSCNN"):
    output = model.create_ds_cnn_model(batch_ph,Config.model_settings,Config.model_size_info,Config.is_training)
    loss = model.lossFunc_dscnn(output,label_ph)
elif(Config.modelName == "GRU"):
    mask = tf.cast(tf.sequence_mask(lengths=length_ph, maxlen=maxLength_ph[0]),dtype=tf.float32)
    output = model.GRU(batch_ph,length_ph)
    tf.add_to_collection("Pred_network",output)
    loss = sequence_loss(output,label_ph,mask)

tf.summary.scalar("Loss", loss/length_ph)
tf.add_to_collection("Pred_network", output)
tf.add_to_collection("Pred_network", loss)

print("Model Ready!")
print("Construct optimizer...")
with tf.name_scope("modelOptimizer"):
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(Config.learningRate,
                                               global_step=global_step,
                                               decay_steps=int(10000/Config.trainBatchSize),
                                               decay_rate=Config.decay_rate,