Пример #1
0
def model_fn(features, labels, mode):
    # 先构建神经网络
    logits = neural_network(features)

    # 预测结果
    pred_calsses = tf.argmax(logits, axis=1)
    pred_probas = tf.nn.softmax(logits)

    # 如果是 预测模式,早早的返回
    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode, predictions=pred_calsses)

    # 定义损失函数和优化函数
    loss_op = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
                                                       labels=tf.cast(
                                                           labels,
                                                           dtype=tf.int32)))
    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    train_op = optimizer.minimize(loss_op,
                                  global_step=tf.train.get_global_step())

    # 验证模型的acc
    acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_calsses)

    # TF Estimators 需要返回一个EstimatorSpec,从而确定是(训练,预测)哪个操作
    estim_specs = tf.estimator.EstimatorSpec(
        mode=mode,
        predictions=pred_calsses,
        loss=loss_op,
        train_op=train_op,
        eval_metric_ops={'accuracy': acc_op})
    return estim_specs
def model_fn(features, labels, mode):
    logits_train = conv_network(features,
                                num_classes,
                                dropout,
                                reuse=False,
                                is_training=True)
    logits_test = conv_network(features,
                               num_classes,
                               dropout,
                               reuse=True,
                               is_training=False)

    pred_classes = tf.argmax(logits_test, axis=1)
    pred_probs = tf.nn.softmax(logits_test)
    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)

    loss_op = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_train,
                                                       labels=tf.cast(
                                                           labels,
                                                           dtype=tf.int32)))
    optimizer = tf.train.AdamOptimizer(learning_rate)
    train_op = optimizer.minimize(loss_op,
                                  global_step=tf.train.get_global_step())

    # 验证模型的acc
    acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)

    estim_spec = tf.estimator.EstimatorSpec(
        mode=mode,
        predictions=pred_classes,
        loss=loss_op,
        train_op=train_op,
        eval_metric_ops={'accuracy': acc_op})

    return estim_spec
def accuracy_fn(inference_fn, inputs, labels):
    prediction = tf.nn.softmax(inference_fn(inputs))
    correct_pred = tf.equal(tf.argmax(prediction, 1), labels)
    return tf.reduce_mean(tf.cast(correct_pred, tf.float32))
Пример #4
0
# training
for i in range(1, num_steps + 1):
    _, d, idx = sess.run([train_op, avg_distance, cluster_idx],
                         feed_dict={X: full_data_x})
    if i % 10 == 0 or i == 1:
        print("Step %i, Avg Distance: %f" % (i, d))

# 为质心分配标签
# 使用每次训练的标签,汇总每个质心的所有标签总数
counts = np.zeros(shape=(k, num_classes))
for i in range(len(idx)):
    counts[idx[i]] += mnist.train.labels[i]

# 把最频繁的标签分配到质心
labels_map = [np.argmax(c) for c in counts]
labels_map = tf.convert_to_tensor(labels_map)

# lookup:通过质心id映射到标签。
cluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx)
# 计算acc
correct_prediction = tf.equal(cluster_label, tf.cast(tf.argmax(Y, 1),
                                                     tf.int32))
accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# 测试Model
test_x, test_y = mnist.test.images, mnist.test.labels
print("Test Accuracy:", sess.run(accuracy_op, feed_dict={
    X: test_x,
    Y: test_y
}))
Пример #5
0
            logits_test = conv_net(_x,
                                   num_classes,
                                   dropout,
                                   reuse=True,
                                   is_training=False)

            # 定义loss和opts,带上logits_train 以使dropout生效
            loss_op = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits(logits=logits_train,
                                                        labels=_y))
            optimizer = tf.train.AdamOptimizer(learning_rate)
            grads = optimizer.compute_gradients(loss_op)
            # 只用其中一个gpu计算acc
            if i == 0:
                # Evaluate model (with test logits, for dropout to be disabled)
                correct_pred = tf.equal(tf.argmax(logits_test, 1),
                                        tf.argmax(_y, 1))
                accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
            reuse_vars = True
            tower_grads.append(grads)

    tower_grads = average_gradients(tower_grads)
    train_op = optimizer.apply_gradients(tower_grads)

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)

        for step in range(1, num_steps + 1):
            batch_x, batch_y = mnist.train.next_batch(batch_size * num_gpus)
            ts = time.time()
Пример #6
0
    sess.run(init)

    for epoch in range(train_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples / batch_size)
        # 循环所有的batchs
        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            #
            _, c = sess.run([optimizer, cost],
                            feed_dict={
                                x: batch_xs,
                                y: batch_ys
                            })
            # 计算平均损失
            avg_cost += c / total_batch
        if (epoch + 1) % display_step == 0:
            print("Epoch:", '%04d' % (epoch + 1), "cost=",
                  "{:.9f}".format(avg_cost))
    print("Ooptimizer Finished!")

    correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))

    # 计算正确率
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    print("Accuracy:",
          accuracy.eval({
              x: mnist.test.images,
              y: mnist.test.labels
          }))
def accuarcy_fn(interface_fn,inputs,labels):
    prediction = tf.nn.softmax(interface_fn(inputs))
    correct_pred = tf.equal(tf.argmax(prediction,1),labels) # 计算预测值和标签是否相等
    return tf.reduce_mean(tf.cast(correct_pred,tf.float32))
Пример #8
0
                                              dtype=tf.float32)

    return tf.matmul(outputs[-1], weights['out']) + biases['out']


logits = RNN(X, weights, biases)
prediction = tf.nn.softmax(logits)

# 定义loss和optimizer
loss_op = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss_op)

# 评估模型
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, dtype=tf.float32))

init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)

    for step in range(1, training_step + 1):
        batch_x, batch_y = mnist.train.next_batch(batch_size)
        # 把数据reshape
        batch_x = batch_x.reshape((batch_size, timesteps, num_input))

        # 先run optimizer
        sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
        if step % display_step == 0 or step == 1:
            loss, acc = sess.run([loss_op, accuracy],
Пример #9
0
X = tf.placeholder(tf.float32, shape=[None, num_features])
Y = tf.placeholder(tf.float32, shape=[None])

hparams = tensor_forest.ForestHParams(num_classes=num_classes,
                                      num_features=num_features,
                                      num_trees=num_trees,
                                      max_nodes=max_nodes).fill()
# 构建随机森林
forgest_graph = tensor_forest.RandomForestGraphs(hparams)
# 获取训练图和损失
train_op = forgest_graph.training_graph(X, Y)
loss_op = forgest_graph.training_loss(X, Y)

# 衡量准确率
infer_op, _, _ = forgest_graph.inference_graph(X)
correct_prediction = tf.equal(tf.argmax(infer_op, 1), tf.cast(Y, tf.int64))
accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

init_vars = tf.group(
    tf.global_variables_initializer(),
    resources.initialize_resources(resources.shared_resources()))

sess = tf.Session()

sess.run(init_vars)

for i in range(1, num_steps + 1):
    batch_x, batch_y = mnist.train.next_batch(batch_size)
    _, l = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y})
    if i % 50 == 0 or i == 1:
        acc = sess.run(accuracy_op, feed_dict={X: batch_x, Y: batch_y})
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))  # 交叉熵

train_step = tf.train.GradientDescentOptimizer(0.01).minimize(
    cross_entropy)  # 训练使用最小梯度下降,且最小化交叉熵loss
init = tf.global_variables_initializer()
for i in range(1000):
    batch = mnist.train.next_batch(50)  # load  mini-batchsize dataset
    train_step.run(feed_dict={x: batch[0], y_: batch[1]})
print("训练结束..")
"""
这段表达特别好:tf.argmax 是一个非常有用的函数,它能给出某个tensor对象在某一维上的其数据最大值所在的索引值。
由于标签向量是由0,1组成,因此最大值1所在的索引位置就是类别标签,比如tf.argmax(y,1)返回的是模型对于任一输入x预测到的标签值,
而 tf.argmax(y_,1) 代表正确的标签,我们可以用 tf.equal 来检测我们的预测是否真实标签匹配(索引位置一样表示匹配)。
"""
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuarcy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

print(accuarcy.eval(feed_dict={
    x: mnist.test.images,
    y_: mnist.test.labels
}))  # 使用softmax取得效果有限
"""
开始使用CNN进行训练识别
"""


# 首先需要创建大量的W和b,由于我们使用的是ReLU神经元,因此比较好的做法是用一个较小的正数来初始化偏置项
def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)
b = tf.Variable(tf.zeros([10]), name='Bias')

# 构造模型并将所有操作封装到scope中,方便tensorboard可视化。

with tf.name_scope('Model'):
    pred = tf.nn.softmax(tf.matmul(x, W) + b)

with tf.name_scope('Loss'):
    cost = tf.reduce_mean(-tf.reduce_sum(y *
                                         tf.log(pred), reduction_indices=1))

with tf.name_scope('SGD'):
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

with tf.name_scope('Accuracy'):
    acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
    acc = tf.reduce_mean(tf.cast(acc, tf.float32))

init = tf.global_variables_initializer()

tf.summary.scalar("loss", cost)

tf.summary.scalar("accuracy", acc)

merged_summary_op = tf.summary.merge_all()

with tf.Session() as sess:
    sess.run(init)
    summary_writer = tf.summary.FileWriter(logs_path,
                                           graph=tf.get_default_graph())