def main(argv): args = parser.parse_args(argv[1:]) # Fetch the data (train_x, train_y), (test_x, test_y) = iris_data.load_data() # Feature columns describe how to use the input. my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # Build 2 hidden layer DNN with 10, 10 units respectively. classifier = tf.estimator.Estimator(model_fn=my_model, params={'feature_columns': my_feature_columns, 'hidden_units': [10, 10], 'n_classes': 3}) # Train the Model. classifier.train(input_fn=lambda:iris_data.train_input_fn(train_x, train_y, args.batch_size), steps=args.train_steps) # Evaluate the model. eval_result = classifier.evaluate(input_fn=lambda:iris_data.eval_input_fn(test_x, test_y, args.batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) # Generate predictions from the model expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = {'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1]} predictions = classifier.predict(input_fn=lambda:iris_data.eval_input_fn(predict_x, labels=None, batch_size=args.batch_size)) for pred_dict, expec in zip(predictions, expected): template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print(template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def main(argv): args = parser.parse_args(argv[1:]) # Fetch the data # train_x.keys() are the CSV_COLUMN_NAMES GIVEN IN iris_data.py file # train_x are the sizes of the petals training values without the last # column that indicates the type of the iris # train_y is the last column that indicates the type of the iris # test_x.keys(), test_x, test_y same that train_x and train_y (train_x, train_y), (test_x, test_y) = iris_data.load_data() # Feature columns describe how to use the input. # need to specify which the feature columns are to Tensor flow # gets the info, name of the feature and the value and init tf stuff # ie: _NumericColumn(key='SepalLength', shape=(1,), default_value=None, dtype=tf.float32 my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # Build 2 hidden layer DNN with 10, 10 units respectively. classifier = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, # Two hidden layers of 10 nodes each. hidden_units=[10, 10], # The model must choose between 3 classes. n_classes=3) # Train the Model. classifier.train(input_fn=lambda: iris_data.train_input_fn( train_x, train_y, args.batch_size), steps=args.train_steps) # Evaluate the model. eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn( test_x, test_y, args.batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) # Generate predictions from the model expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn( predict_x, labels=None, batch_size=args.batch_size)) for pred_dict, expec in zip(predictions, expected): template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print( template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def main(argv): args = parser.parse_args(argv[1:]) # Fetch the data (train_x, train_y), (test_x, test_y) = iris_data.load_data() # Create feature columns for all features my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key, dtype=tf.float32)) #alternative feature_columns = [tf.feature_column.numeric_column(name) for name in iris_data.CSV_COLUMN_NAMES[:-1]] # Build 2 hidden layer DNN with 10, 10 units respectively. classifier = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, hidden_units=[10, 10], n_classes=3) # Train the model. classifier.train( input_fn = lambda:iris_data.train_input_fn(train_x, train_y, args.batch_size), steps=args.train_steps) # Evaluate the model. eval_result = classifier.evaluate( input_fn = lambda:iris_data.eval_input_fn(test_x, test_y, args.batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) #eval_result = kwargs (keyworded variable-length argument list) # for key in eval_result: # print("key: {}, value: {}".format(key, eval_result[key])) # eval_result = {accuracy: val, average_loss: val, loss: val, global_step: val} # Generate predictions from the model expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predictions = classifier.predict( input_fn = lambda:iris_data.eval_input_fn(predict_x, labels=None, batch_size=args.batch_size)) template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') for pred_dict, expec in zip(predictions, expected): class_id = pred_dict['class_ids'][0] print("class_id {}".format(class_id)) probability = pred_dict['probabilities'][class_id] print(template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def main(argv): args = parser.parse_args(argv[1:]) # Fetch the data (train_x, train_y), (test_x, test_y) = iris_data.load_data() # Feature columns describe how to use the input. my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) #print (train_x['SepalLength']) # Estimator will build the complete neual network. # Build 2 hidden layer DNN with 10, 10 units respectively. classifier = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, # Two hidden layers of 10 nodes each. (2 hidden layers and 10 neurons in each hidden layer) hidden_units=[10, 10], # The model must choose between 3 classes. (output layer) n_classes=3) # Train the Model. classifier.train( # train_input_fn(features, labels, batch_size) input_fn=lambda: iris_data.train_input_fn(train_x, train_y, args. batch_size), # default value of args.train_steps = 1000 steps=args.train_steps ) # steps - tells the dataframe train to stop training after specified number of iterations # Evaluate the model. eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn( test_x, test_y, args.batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) # Generate predictions from the model expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn( predict_x, labels=None, batch_size=args.batch_size)) template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') for pred_dict, expec in zip(predictions, expected): class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print( template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def main(argv): args = parser.parse_args(argv[1:]) # Fetch the data (train_x, train_y), (test_x, test_y) = iris_data.load_data() # Feature columns describe how to use the input. my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) my_checkpointing_config = tf.estimator.RunConfig( save_checkpoints_secs=20 * 60, # Save checkpoints every 20 minutes. #save_checkpoints_steps = 200, # Save checkpoints every 200 steps. keep_checkpoint_max=10, # Retain the 10 most recent checkpoints. ) classifier = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, hidden_units=[10, 10], # can not restore checkpoint if model changes n_classes=3, model_dir='models/iris', config=my_checkpointing_config, ) # Train the Model. classifier.train(input_fn=lambda: iris_data.train_input_fn( train_x, train_y, args.batch_size), steps=args.train_steps) # Evaluate the model. eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn( test_x, test_y, args.batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) # Generate predictions from the model expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn( predict_x, labels=None, batch_size=args.batch_size)) template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') for pred_dict, expec in zip(predictions, expected): class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print( template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def main(argv): args = parser.parse_args(argv[1:]) # Fetch the data (train_x, train_y), (test_x, test_y) = iris_data.load_data() # Feature columns describe how to use the input. my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # Build 2 hidden layer DNN with 10, 10 units respectively. classifier = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, # Two hidden layers of 10 nodes each. hidden_units=[10, 10], # The model must choose between 3 classes. n_classes=3) # Train the Model. classifier.train(input_fn=lambda: iris_data.train_input_fn( train_x, train_y, args.batch_size), steps=args.train_steps) # Evaluate the model. eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn( test_x, test_y, args.batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) # Generate predictions from the model expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 5.9], 'SepalWidth': [3.3, 3.0, 3.0], 'PetalLength': [1.7, 4.2, 4.2], 'PetalWidth': [0.5, 1.5, 1.5], } predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn( predict_x, labels=None, batch_size=args.batch_size)) template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') for pred_dict, expec in zip(predictions, expected): class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print( template.format(iris_data.SPECIES[class_id], 100 * probability, expec)) # tensorflow提供了多种写日志文件的API sess = tf.Session() writer = tf.summary.FileWriter('./log', sess.graph) writer.close()
def main(argv): args = parser.parse_args(argv[1:]) # Fetch the data (train_x, train_y), (test_x, test_y) = iris_data.load_data() # Feature columns describe how to use the input. my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # Build 2 hidden layer DNN with 10, 10 units respectively. classifier = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, # Two hidden layers of 10 nodes each. hidden_units=[10, 10], # Model dir model_dir='./logdir', # The model must choose between 3 classes. n_classes=3) # Train the Model. classifier.train( input_fn=lambda:iris_data.train_input_fn(train_x, train_y, args.batch_size), steps=args.train_steps) # Evaluate the model. eval_result = classifier.evaluate( input_fn=lambda:iris_data.eval_input_fn(test_x, test_y, args.batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) # Generate predictions from the model expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predictions = classifier.predict( input_fn=lambda:iris_data.eval_input_fn(predict_x, labels=None, batch_size=args.batch_size)) template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') for pred_dict, expec in zip(predictions, expected): class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print(template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def main(argv): args = parser.parse_args(argv[1:]) # 获取数据 (train_x, train_y), (test_x, test_y) = iris_data.load_data() # 描述输入的特征 my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # 构建2个有隐藏层DNN,每层有10个神经元 classifier = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, # 2层,每层有10个神经元 hidden_units=[10, 10], # 标签有3种,所以预测结果是3 n_classes=3, model_dir="model") # 训练模型 classifier.train( input_fn=lambda:iris_data.train_input_fn(train_x, train_y, args.batch_size), steps=args.train_steps) # 评估模型 eval_result = classifier.evaluate( input_fn=lambda:iris_data.eval_input_fn(test_x, test_y, args.batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) # 为模型生成预测 expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predictions = classifier.predict( input_fn=lambda:iris_data.eval_input_fn(predict_x, labels=None, batch_size=args.batch_size)) template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') for pred_dict, expec in zip(predictions, expected): class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print(template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def main(conf): # Fetch the data (train_x, train_y), (test_x, test_y) = iris_data.load_data() if "n_iterations" in config: train_steps = 100 * config.n_iterations else: train_steps = 1000 # Feature columns describe how to use the input. my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # Build 2 hidden layer DNN with 10, 10 units respectively. classifier = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, # Two hidden layers of 10 nodes each. hidden_units=[conf["layer1"], conf["layer2"]], # The model must choose between 3 classes. n_classes=3) # Train the Model. classifier.train(input_fn=lambda: iris_data.train_input_fn( train_x, train_y, batch_size), steps=train_steps) # Evaluate the model. eval_result = classifier.evaluate( input_fn=lambda: iris_data.eval_input_fn(test_x, test_y, batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) # Generate predictions from the model expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn( predict_x, labels=None, batch_size=batch_size)) template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') for pred_dict, expec in zip(predictions, expected): class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print( template.format(iris_data.SPECIES[class_id], 100 * probability, expec)) return eval_result["accuracy"]
def main(argv): args = parser.parse_args() # Fetch the data (train_x, train_y), (test_x, test_y) = iris_data.load_data() # Feature columns describe how to use the input. my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # Build 2 hidden layer DNN with 10, 10 units respectively. classifier = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, # Two hidden layers of 10 nodes each. hidden_units=[10, 10], # The model must choose between 3 classes. n_classes=3) # Train the Model. classifier.train(input_fn=lambda:iris_data.train_input_fn(train_x, train_y, args.batch_size), steps=args.train_steps) # Evaluate the model. eval_result = classifier.evaluate(input_fn=lambda:iris_data.eval_input_fn(test_x, test_y, args.batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) # Generate predictions from the model expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predictions = classifier.predict(input_fn=lambda:iris_data.eval_input_fn(predict_x, labels=None, batch_size=args.batch_size)) template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') for pred_dict, expec in zip(predictions, expected): ''' print(pred_dict) result { 'logits': array([-16.18431473, 5.61845016, 13.43036652], dtype=float32), 'probabilities': array([1.37509145e-13, 4.04717575e-04, 9.99595344e-01], dtype=float32), 'class_ids': array([2]), 'classes': array([b'2'], dtype=object) } ''' class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print(template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def main(argv): args = parser.parse_args(argv[1:]) # Fetch the data (train_x, train_y), (test_x, test_y) = iris_data.load_data() # Feature columns describe how to use the input. my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # Build 2 hidden layer DNN with 10, 10 units respectively. classifier = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, # Two hidden layers of 10 nodes each. hidden_units=[10, 10], model_dir='res1', # The model must choose between 3 classes. n_classes=4) # Train the Model. classifier.train(input_fn=lambda: iris_data.train_input_fn( train_x, train_y, args.batch_size), steps=args.train_steps) # Evaluate the model. eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn( test_x, test_y, args.batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) # Generate predictions from the model expected = ['MSC', 'MBA', 'MCA', 'JOB'] predict_x = { 'Q1': [1.0, 0.0, 0.0, 0.0], 'Q2': [1.0, 0.0, 1.0, 1.0], 'Q3': [1.0, 1.0, 0.0, 0.0], 'Q4': [1.0, 0.0, 0.0, 0.0], 'Q5': [1.0, 0.0, 1.0, 0.0], } predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn( predict_x, labels=None, batch_size=args.batch_size)) template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') for pred_dict, expec in zip(predictions, expected): class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print( template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def main(argv): args = parser.parse_args(argv[1:]) # Fetch the data (train_x, train_y), (test_x, test_y) = iris_data.load_data() #获取数据 # Feature columns describe how to use the input. my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # Build 2 hidden layer DNN with 10, 10 units respectively. classifier = tf.estimator.DNNClassifier( # DNNClassifier深度神经网络分类器 estimator调用一个或多个方法,传递适当的输入函数作为数据的来源 feature_columns=my_feature_columns, # # Two hidden layers of 10 nodes each. hidden_units=[10, 10], #两个隐藏层,每个层10个节点 # The model must choose between 3 classes. n_classes=3) # 结果为3类 参考官方网站 # Train the Model. classifier.train( #训练模型的函数 input_fn=lambda: iris_data.train_input_fn(train_x, train_y, args. batch_size), #检查可用率 steps=args.train_steps) # Evaluate the model. eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn( test_x, test_y, args.batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) # Generate predictions from the model expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], #传递三个鸢尾花的特征,得到数据相应的分类 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn( predict_x, labels=None, batch_size=args.batch_size)) template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') for pred_dict, expec in zip(predictions, expected): class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print( template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def app1(): import iris_data batch_size = 100 train_steps = 1000 (train_x, train_y), (test_x, test_y) = iris_data.load_data() my_feature_columns = [ tf.feature_column.numeric_column(key=key) for key in train_x.keys() ] classifier = tf.estimator.Estimator(model_fn=my_model, params={ 'feature_columns': my_feature_columns, 'hidden_units': [10, 10], 'n_classes': 3 }, model_dir='./output/d1') classifier.train(input_fn=lambda: iris_data.train_input_fn( train_x, train_y, batch_size), steps=train_steps) eval_result = classifier.evaluate( input_fn=lambda: iris_data.eval_input_fn(test_x, test_y, batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn( predict_x, labels=None, batch_size=batch_size)) for pred_dict, expec in zip(predictions, expected): template = '\nPrediction is "{}" ({:.1f}%), expected "{}"' class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print( template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def main(_): # build dataset (train_x, train_y), (test_x, test_y) = iris_data.load_data() # feature_column(describe how to use input) my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # estimator classifier = tf.estimator.DNNClassifier( hidden_units=[10, 10], feature_columns=my_feature_columns, n_classes=3, ) classifier.train(input_fn=lambda: iris_data.train_input_fn( train_x, train_y, FLAGS.batch_size), steps=FLAGS.train_steps) evaluate_result = classifier.evaluate( input_fn=lambda: iris_data.eval_input_fn(test_x, test_y, FLAGS. batch_size), steps=FLAGS.train_steps) # {'accuracy': 0.96666664, 'average_loss': 0.058699723, 'loss': 1.7609917, 'global_step': 1000} print('-' * 40) print('accuracy :{accuracy:0.3f}'.format(**evaluate_result)) # Generate predictions from the model expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } print('-' * 40) predict_result = classifier.predict( input_fn=lambda: iris_data.eval_input_fn( predict_x, label=None, batch_size=FLAGS.batch_size), ) template = '预测结果: {} 概率:{:0.2f}% 。期望: {}' for predict_dict, expec in zip(predict_result, expected): # predict_dict : { # 'logits': array([ 13.478667 , 7.7477455, -21.122244 ], dtype=float32), # 'probabilities': array([9.9676645e-01, 3.2335958e-03, 9.3671849e-16], dtype=float32), # 'class_ids': array([0]), 'classes': array([b'0'], dtype=object)} class_ids = predict_dict['class_ids'][0] _predict = iris_data.SPECIES[class_ids] probability = predict_dict['probabilities'][class_ids] print(template.format(_predict, probability * 100, expec))
def main(argv): args = parser.parse_args(argv[1:]) batch_size = args.batch_size train_steps= args.train_steps (train_x, train_y), (test_x, test_y) = iris_data.load_data() # print(train_y) # configure feature cloumns describe how to use the input. my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # print(my_feature_columns) # build estimator classifer = tf.estimator.DNNClassifier( feature_columns = my_feature_columns, hidden_units=[10, 10], n_classes = 3 ) classifer.train( input_fn = lambda:iris_data.train_input_fn(train_x, train_y, batch_size), steps=train_steps ) eval_result = classifer.evaluate( input_fn=lambda:iris_data.eval_input_fn(test_x, test_y, batch_size), ) print('\nTest SET ACCURACY:{accuracy:0.3f}\n'.format(**eval_result)) # generate predictions from the model expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1],} predictions = classifer.predict( input_fn=lambda:iris_data.eval_input_fn(predict_x, labels=None, batch_size=batch_size) ) for pred_dict, expec in zip(predictions, expected): template = ('\nPrediction is "{}" ("{:.1f}%"), excepted "{}"') class_id = pred_dict['class_ids'][0] prob = pred_dict['probabilities'][class_id] print(template.format(iris_data.SPECIES[class_id], 100*prob, expec))
def main(): # 加载数据 (train_x, train_y), (test_x, test_y) = load_data() # 定义特征列 my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # 选择模型 classifier = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, hidden_units=[10, 10], # 定义每个隐层的单元数(2个隐层,每层10个隐藏单元) n_classes=3, # 类别数 model_dir="models/iris") # 指定模型保存目录 # 训练,传入数据train_x即为特征,train_y即为标签 classifier.train(input_fn=lambda: train_input_fn( features=train_x, labels=train_y, batch_size=batch_size), steps=train_steps) # 评估,返回eval_result是一个字典,有4个key:accuracy,average_loss,global_step,loss eval_result = classifier.evaluate(input_fn=lambda: eval_input_fn( features=test_x, labels=test_y, batch_size=batch_size)) print('Test set accuracy: {:0.3f}'.format(eval_result["accuracy"])) # 预测,3个实例 expected = ['Setosa', 'Versicolor', 'Virginica'] # 这3个实例的期望类别 predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } # predictions包含所有的预测 predictions = classifier.predict(input_fn=lambda: eval_input_fn( features=predict_x, labels=None, batch_size=batch_size)) template = 'Prediction is "{}" ({:.1f}%), expected "{}"' # 类别,概率,期望类别 # 打印预测结果 for pred_dict, expec in zip(predictions, expected): class_id = pred_dict['class_ids'][0] # 预测的标签编号 probability = pred_dict['probabilities'][class_id] # 该类别的概率 print(template.format(SPECIES[class_id], 100 * probability, expec))
def main(): # 加载数据 (train_x, train_y), (test_x, test_y) = load_data() # 定义特征列 my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # 自定义模型 classifier = tf.estimator.Estimator(model_fn=my_model, params={ 'feature_columns': my_feature_columns, 'hidden_units': [10, 10], 'n_classes': 3, }) # 训练模型,调用train()方法时,Estimator 框架会调用模型函数并将 mode 设为 ModeKeys.TRAIN classifier.train( input_fn=lambda: train_input_fn(train_x, train_y, batch_size), # 输入函数 steps=train_steps) # 评估模型 eval_result = classifier.evaluate( input_fn=lambda: eval_input_fn(test_x, test_y, batch_size)) print('Test set accuracy: {:0.3f}'.format(eval_result["accuracy"])) # 预测 expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predictions = classifier.predict(input_fn=lambda: eval_input_fn( predict_x, labels=None, batch_size=batch_size)) for pred_dict, expec in zip(predictions, expected): template = 'Prediction is "{}" ({:.1f}%), expected "{}"' class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print(template.format(SPECIES[class_id], 100 * probability, expec))
def my_model(): train_path = "data/iris_training.csv" test_path = "data/iris_test.csv" batch_size = 100 train_steps = 1000 _, (test_x, test_y) = load_data() # All the inputs are numeric # 定义特征列 feature_columns = [ tf.feature_column.numeric_column(key) for key in CSV_COLUMN_NAMES[:-1] ] # 构建estimator classifier = tf.estimator.LinearClassifier(feature_columns=feature_columns, n_classes=3, model_dir="models/iris_Linear") # 训练 classifier.train(input_fn=lambda: csv_input_fn(train_path, batch_size), steps=train_steps) # 评估,返回eval_result是一个字典,有4个key:accuracy,average_loss,global_step,loss eval_result = classifier.evaluate(input_fn=lambda: eval_input_fn( features=test_x, labels=test_y, batch_size=batch_size)) print('Test set accuracy: {:0.3f}'.format(eval_result["accuracy"]))
def main(argv): args = parser.parse_args(argv[1:]) # Fetch the data (train_x, train_y), (test_x, test_y) = iris_data.load_data() # Feature columns describe how to use the input. my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # Build 2 hidden layer DNN with 10, 10 units respectively. classifier = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, # Two hidden layers of 10 nodes each. hidden_units=[25, 25, 25], # The model must choose between 3 classes. n_classes=3) # Train the Model. classifier.train(input_fn=lambda: iris_data.train_input_fn( train_x, train_y, args.batch_size), steps=args.train_steps) # Evaluate the model. eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn( test_x, test_y, args.batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) # Generate predictions from the model '''
def predict(estimator): ''' 构造假的测试数据,进行模型预测 :param estimator: :return: ''' # Generate predictions from the model expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predictions = estimator.predict(input_fn=lambda: iris_data.eval_input_fn( predict_x, labels=None, batch_size=len(predict_x))) template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') for pred_dict, expec in zip(predictions, expected): class_id = pred_dict['class_ids'][0] probability = pred_dict["probabilities"][class_id] print( template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def model_api(): # Generate predictions from the model expected = ['PG','DropOut','Job'] predict_x = { 'M1': [91.0, 13.0, 40.0], 'M2': [86.0, 10.0, 38.0], 'M3': [91.0, 11.0, 60.0], 'M4': [81.0, 10.0, 65.0], 'sQ1': [1.0, 0.0, 1.0], 'sQ2': [1.0, 1.0, 0.0], 'sQ3': [1.0, 0.0, 1.0], 'sQ4': [1.0, 0.0, 1.0], 'sQ5': [1.0, 1.0, 0.0], 'TQ1': [1.0, 1.0, 1.0], 'TQ2': [1.0, 0.0, 0.0], 'TQ3': [1.0, 1.0, 0.0], 'TQ4': [1.0, 0.0, 1.0], 'TQ5': [1.0, 0.0, 0.0], } predictions = classifier.predict( input_fn=lambda:iris_data.eval_input_fn(predict_x, labels=None, batch_size=1000)) for pred_dict, expec in zip(predictions, expected): template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print(template.format(iris_data.SPECIES[class_id], 100 * probability, expec)) output_data = {"prediction":iris_data.SPECIES[class_id],"value":round(probability,4)} #output_data = iris_data.SPECIES[class_id] return output_data
def model_api(): # Generate predictions from the model expected = ['AI', 'Animation', 'SW', 'Cyber'] predict_x = { 'Q1': [1.0, 0.0, 0.0, 0.0], 'Q2': [1.0, 0.0, 0.0, 0.0], 'Q3': [0.0, 1.0, 0.0, 0.0], 'Q4': [0.0, 1.0, 1.0, 0.0], 'Q5': [0.0, 0.0, 0.0, 1.0], } predictions = classifier.predict( input_fn=lambda: iris_data.eval_input_fn( predict_x, labels=None, batch_size=1000)) for pred_dict, expec in zip(predictions, expected): template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print( template.format(iris_data.SPECIES[class_id], 100 * probability, expec)) output_data = { "prediction": iris_data.SPECIES[class_id], "value": round(probability, 4) } #output_data = iris_data.SPECIES[class_id] return output_data
def evaluate(model, data, args): """Evaluate the trained model.""" _, (test_x, test_y) = data input_fn = lambda: iris_data.eval_input_fn( test_x, test_y, args.batch_size) eval_result = model.evaluate(input_fn=input_fn) print('Test set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
def main(argv): args = parser.parse_args(argv[1:]) # 加载数据, pandas类型 (train_x, train_y), (test_x, test_y) = iris_data.load_data() # feature columns描述如何使用输入数据 my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # 建立模型,并保存模型 my_checkpoint_config = tf.estimator.RunConfig( save_checkpoints_secs=20 * 60, # 每20分钟保存一次 keep_checkpoint_max=10) # 保存10个最近的检查点 cls = tf.estimator.DNNClassifier(hidden_units=[10, 10], feature_columns=my_feature_columns, n_classes=3, model_dir='model/', config=my_checkpoint_config) # 训练模型 cls.train(input_fn=lambda: iris_data.train_input_fn( train_x, train_y, args.batch_size), steps=args.train_steps) # 评价模型 eval_res = cls.evaluate(input_fn=lambda: iris_data.eval_input_fn( test_x, test_y, args.batch_size)) print("\n Test Set accuracy: {:0.3f}\n".format(eval_res['accuracy'])) # 预测 expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } cls.predict(input_fn, predict_keys=None, hooks=None, checkpoint_path=None, yield_single_examples=True) predictions = cls.predict(input_fn=lambda: iris_data.eval_input_fn( predict_x, labels=None, batch_size=args.batch_size)) template = ('\n Prediction is "{}" ({:.1f}%), expected "{}"') for pred_dict, expec in zip(predictions, expected): class_id = pred_dict['class_ids'][0] prob = pred_dict['probabilities'][class_id] print(template.format(iris_data.SPECIES[class_id], 100 * prob, expec))
def main(_): (train_x, train_y), (test_x, test_y) = iris_data.load_data() my_feature_columns = [] # train_x.keys()获取列名 for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) print(my_feature_columns) # 创建有两个隐藏层,每层10个节点的神经网络 classifier = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, hidden_units=[10, 10], n_classes=3 ) # train classifier.train( input_fn=lambda :iris_data.train_input_fn(train_x, train_y, FLAGS.batch_size), steps=FLAGS.train_step ) # evaluate eval_result = classifier.evaluate( input_fn=lambda: iris_data.eval_input_fn(test_x, test_y, FLAGS.batch_size) ) #print('===========', eval_result) # {'accuracy': 0.96666664, 'average_loss': 0.065968044, 'loss': 1.9790413, 'global_step': 1000} print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) # predict expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predictions = classifier.predict( input_fn=lambda: iris_data.eval_input_fn(predict_x, labels=None, batch_size=FLAGS.batch_size) ) for pred_dict, expec in zip(predictions, expected): template = ('\nPrediction is "{}"({:.1f}%), expected "{}"') class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print(template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def main(argv): args = parser.parse_args(argv[1:]) (train_x, train_y), (test_x, test_y) = iris_data.load_data() my_feature_columns = [] for key in train_x: my_feature_columns.append(tf.feature_column.numeric_column(key=key)) classifier = tf.estimator.Estimator(model_fn=my_model, params={ 'feature_columns': my_feature_columns, 'hidden_units': [20, 20, 20], 'n_classes': 3, }) classifier.train(input_fn=lambda: iris_data.train_input_fn( train_x, train_y, args.batch_size), steps=args.train_steps) eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn( test_x, test_y, args.batch_size)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result)) expected = ['Setosa', 'Versicolor', 'Virginica'] predict_x = { 'SepalLength': [5.1, 5.9, 6.9], 'SepalWidth': [3.3, 3.0, 3.1], 'PetalLength': [1.7, 4.2, 5.4], 'PetalWidth': [0.5, 1.5, 2.1], } predications = classifier.predict(input_fn=lambda: iris_data.eval_input_fn( predict_x, labels=None, batch_size=args.batch_size)) for pred_dict, expec in zip(predications, expected): template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') class_id = pred_dict['class_ids'][0] probability = pred_dict['probablities'][class_id] print( template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def _setup(self, config): """ Setup your tensorflow model :param config: :return: """ # Hyperparameters for this trial can be accessed in dictionary self.config self.config = config # save all checkpoints independently to ray.tune checkpoint_path to avoid deleting by # tune.trainable.restore_from_object model dir based on timestamp if self.config['exp'] in self.logdir: self.model_dir = self.logdir.split(self.config['exp'])[0] + self.config['exp'] else: raise IndexError(self.logdir + ' does not contain splitter ' + self.config['exp'] + 'check configuration ' 'logdir path and exp ' 'filed') self.model_dir_full = self.model_dir + '/' + datetime.datetime.now().strftime("%d_%b_%Y_%I_%M_%S_%f%p") # configuration self.training_steps = 250 self.run_config = tf.estimator.RunConfig( save_summary_steps=100, save_checkpoints_secs=None, # save checkpoint only before and after self.estimator.train() save_checkpoints_steps=self.training_steps, # save both iterator checkpoint and model checkpoints after # same number of steps keep_checkpoint_max=None, # avoid removing checkpoints keep_checkpoint_every_n_hours=None) # load data (train_x, train_y), (test_x, test_y) = iris_data.load_data() # Feature columns describe how to use the input. self.my_feature_columns = [] for key in train_x.keys(): self.my_feature_columns.append(tf.feature_column.numeric_column(key=key)) # estimator # Build 2 hidden layer DNN with 10, 10 units respectively. self.estimator = tf.estimator.DNNClassifier( feature_columns=self.my_feature_columns, # Two hidden layers of 10 nodes each. hidden_units=[10, 10], # The model must choose between 3 classes. n_classes=3, optimizer=tf.train.ProximalAdagradOptimizer( learning_rate=self.config['lr'], l1_regularization_strength=self.config['l1'], )) # data for evaluation self.input_fn_eval = lambda: iris_data.eval_input_fn(test_x, test_y, batch_size=20) # data for train self.input_fn_train = lambda: iris_data.train_input_fn(train_x, train_y, batch_size=20) self.steps = 0
def eval(estimator): ''' 训练完模型后,进行模型评估 :param estimator: :return: ''' eval_result = estimator.evaluate( input_fn=lambda: iris_data.eval_input_fn(test_x, test_y, BATCH_SIZE)) print("\nTest set accuracy: {accuracy:0.3f}\n".format(**eval_result))
def main(argv): args = parser.parse_args(argv[1:]) #fetch data (train_X, train_Y), (test_X, test_Y) = iris_data.load_data() #descibe how use input my_feature_column = [] for key in train_X.keys(): my_feature_column.append(tf.feature_column.numeric_column(key=key)) #build 2 hidden layer DNN 10,10 classifier = tf.estimator.DNNClassifier(feature_columns=my_feature_column, hidden_units=[10, 10], n_classes=3) #train model classifier.train(input_fn=lambda: iris_data.train_input_fn( train_X, train_Y, args.batch_size), steps=args.train_steps) #eval model eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn( test_X, test_Y, args.batch_size)) print("Test accuracy: {accuracy:0.3f}\n".format(**eval_result)) expected = ['Setosa', 'Versicolor', 'Virginica'] features = { "SepalLength": [6.4, 5.0], "SepalWidth": [2.8, 2.3], "PetalLength": [5.6, 3.3], "PetalWidth": [2.2, 1.0] } predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn( features, labels=None, batch_size=args.batch_size)) template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"') for pred_dict, expec in zip(predictions, expected): class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] print( template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def main(argv): # Fetch the data (train_x, train_y), (test_x, test_y) = iris_data.load_data() # Feature columns describe how to use the input. my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append(tf.feature_column.numeric_column(key=key)) #Based on -- https://medium.com/tensorflow/multi-gpu-training-with-estimators-tf-keras-and-tf-data-ba584c3134db NUM_GPUS = 2 strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=NUM_GPUS) #config = tf.estimator.RunConfig(train_distribute=strategy) # estimator = tf.keras.estimator.model_to_estimator(model, # config=config) config = tf.estimator.RunConfig( model_dir="/tmp/tf_estimator_iris_model", save_summary_steps=1, train_distribute=strategy, save_checkpoints_steps=100, keep_checkpoint_max=3, log_step_count_steps=10) # Build 2 hidden layer DNN with 10, 10 units respectively. estimator = tf.estimator.DNNClassifier( feature_columns=my_feature_columns, model_dir="/tmp/tf_estimator_iris_model", # Two hidden layers of 10 nodes each. hidden_units=[10, 10], config=config, # The model must choose between 3 classes. n_classes=3) train_input_fn = lambda:iris_data.train_input_fn(train_x, train_y, opts.batch_size) train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=1000) # Evaluate the model. eval_input_fn = lambda:iris_data.eval_input_fn(test_x, test_y, opts.batch_size) eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=None, start_delay_secs=0, throttle_secs=60) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def predict(classifier, batch_size, emotions): predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn( emotions, labels=None, batch_size=batch_size)) for pred_dict in predictions: class_id = pred_dict['class_ids'][0] probability = pred_dict['probabilities'][class_id] return iris_data.SCORES[class_id], probability
return tf.estimator.EstimatorSpec( mode, loss=loss, eval_metric_ops=metrics) # Create training op. assert mode == tf.estimator.ModeKeys.TRAIN optimizer = tf.train.AdagradOptimizer(learning_rate=0.1) train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) classifier = tf.estimator.Estimator( model_fn=my_model, params={ 'feature_columns': my_feature_columns, # Two hidden layers of 10 nodes each. 'hidden_units': [10, 10], # The model must choose between 3 classes. 'n_classes': 3, } ) # Train the Model. classifier.train( input_fn=lambda:iris_data.train_input_fn(train_x, train_y, 128), steps=100) eval_result = classifier.evaluate( input_fn=lambda:iris_data.eval_input_fn(test_x, test_y, 10000)) print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))