コード例 #1
0
def my_model():
    train_path = "data/iris_training.csv"
    test_path = "data/iris_test.csv"
    batch_size = 100
    train_steps = 1000

    _, (test_x, test_y) = load_data()

    # All the inputs are numeric
    # 定义特征列
    feature_columns = [
        tf.feature_column.numeric_column(key) for key in CSV_COLUMN_NAMES[:-1]
    ]

    # 构建estimator
    classifier = tf.estimator.LinearClassifier(feature_columns=feature_columns,
                                               n_classes=3,
                                               model_dir="models/iris_Linear")

    # 训练
    classifier.train(input_fn=lambda: csv_input_fn(train_path, batch_size),
                     steps=train_steps)

    # 评估,返回eval_result是一个字典,有4个key:accuracy,average_loss,global_step,loss
    eval_result = classifier.evaluate(input_fn=lambda: eval_input_fn(
        features=test_x, labels=test_y, batch_size=batch_size))

    print('Test set accuracy: {:0.3f}'.format(eval_result["accuracy"]))
コード例 #2
0
def main(argv):
    args = parser.parse_args(argv[1:])
    # 加载数据, pandas类型
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()
    # feature columns描述如何使用输入数据
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))
    # 建立模型,并保存模型
    my_checkpoint_config = tf.estimator.RunConfig(
        save_checkpoints_secs=20 * 60,  # 每20分钟保存一次
        keep_checkpoint_max=10)  # 保存10个最近的检查点
    cls = tf.estimator.DNNClassifier(hidden_units=[10, 10],
                                     feature_columns=my_feature_columns,
                                     n_classes=3,
                                     model_dir='model/',
                                     config=my_checkpoint_config)
    # 训练模型
    # cls.train(input_fn=lambda:iris_data.train_input_fn(train_x, train_y, args.batch_size),
    #           steps=args.train_steps)
    cls.train(input_fn=lambda: iris_data.csv_input_fn('iris_training.csv', args
                                                      .batch_size),
              steps=args.train_steps)
    # 评价模型
    eval_res = cls.evaluate(input_fn=lambda: iris_data.eval_input_fn(
        test_x, test_y, args.batch_size))
    print("\n Test Set accuracy: {:0.3f}\n".format(eval_res['accuracy']))

    # 预测
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }
    predictions = cls.predict(input_fn=lambda: iris_data.eval_input_fn(
        predict_x, labels=None, batch_size=args.batch_size))
    template = ('\n Prediction is "{}" ({:.1f}%), expected "{}"')
    for pred_dict, expec in zip(predictions, expected):
        class_id = pred_dict['class_ids'][0]
        prob = pred_dict['probabilities'][class_id]
        print(template.format(iris_data.SPECIES[class_id], 100 * prob, expec))
コード例 #3
0
ファイル: Train.py プロジェクト: ITBAIO2/CTR
import iris_data
import pandas as pd
import tensorflow as tf

train_path, test_path = iris_data.maybe_download()
print(train_path)

# All the inputs are numeric
feature_columns = [
    tf.feature_column.numeric_column(name)
    for name in iris_data.CSV_COLUMN_NAMES[1:]
]

# Build the estimator
est = tf.estimator.LinearClassifier(feature_columns, n_classes=2)

# Train the estimator
batch_size = 100
est.train(steps=1000,
          input_fn=lambda: iris_data.csv_input_fn(train_path, batch_size))
コード例 #4
0
ファイル: test_estimator.py プロジェクト: niuyi/AITest
def parse_csv2():
    train_path, test_path = iris_data.maybe_download()
    iris_data.csv_input_fn(train_path, 1000)