Esempio n. 1
0
    'conv1': tf.Variable(tf.random_normal([7, 1, 128])),
    'conv1_out': tf.Variable(tf.random_normal([128, 1])),
    'out': tf.Variable(tf.random_normal([2 * num_hidden, num_classes]))
}
biases = {
    'conv1': tf.Variable(tf.random_normal([128])),
    'conv1_out': tf.Variable(tf.random_normal([1])),
    'out': tf.Variable(tf.random_normal([num_classes]))
}

# 定义输入data、label
X = tf.placeholder(tf.float32, [1, None, num_input], name='X')
Y = tf.placeholder(tf.float32, [None, num_classes], name='Y')

# 定义loss和优化函数
model_ = model_(X, num_hidden, weights, biases)
logits = model_.modeling()
prediction = tf.nn.softmax(logits)

# 计算损失
loss_op = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))
# 定义优化函数
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# optimizer = tf.keras.optimizers.SGD()
# optimizer = tf.train.GradientDescentOptimizer()
# optimizer = tf.train.MomentumOptimizer(learning_rate,0.9,use_nesterov=True)
train_process = optimizer.minimize(loss_op)

# 定义准确率
acc = tf.reduce_mean(
Esempio n. 2
0
    'conv1': tf.Variable(tf.random_normal([7, 1, 128])),
    'conv1_out': tf.Variable(tf.random_normal([128, 1])),
    'out': tf.Variable(init([2 * num_hidden, num_classes]))
}
biases = {
    'conv1': tf.Variable(tf.random_normal([128])),
    'conv1_out': tf.Variable(tf.random_normal([1])),
    'out': tf.Variable(init([num_classes]))
}

# 定义输入data、label
X = tf.placeholder(tf.float32, [None, time_steps, num_input], name='X')
Y = tf.placeholder(tf.float32, [None, num_classes], name='Y')

# 定义loss和优化函数
model_ = model_(X, num_hidden, weights, biases, batch_size)
logits = model_.modeling()
prediction = tf.nn.softmax(logits)

# 计算损失
lr = tf.Variable(0.001, dtype=tf.float32)
loss_op = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y))
# 定义优化函数
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
# optimizer = tf.keras.optimizers.SGD()
# optimizer = tf.train.GradientDescentOptimizer()
# optimizer = tf.train.MomentumOptimizer(learning_rate,0.9,use_nesterov=True)
train_process = optimizer.minimize(loss_op)

# 定义准确率