Beispiel #1
0
def main(argv):
    args = parser.parse_args(argv[1:])

    # Fetch the data
    print(type(iris_data.load_data()))
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()
    print('train_x')
    print(type(train_x))
    print('train_y')
    print(type(train_y))
    # Feature columns describe how to use the input.
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    # Build 2 hidden layer DNN with 10, 10 units respectively.
    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        # Two hidden layers of 10 nodes each.
        hidden_units=[10, 10],
        # The model must choose between 3 classes.
        n_classes=3)

    # Train the Model.
    classifier.train(input_fn=lambda: iris_data.train_input_fn(
        train_x, train_y, args.batch_size),
                     steps=args.train_steps)

    # Evaluate the model.
    eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn(
        test_x, test_y, args.batch_size))

    #print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

    # Generate predictions from the model
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }

    predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn(
        predict_x, labels=None, batch_size=args.batch_size))

    template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')

    for pred_dict, expec in zip(predictions, expected):
        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]
Beispiel #2
0
def main(argv):
    args = parser.parse_args(argv[1:])
    
    # Fetch the data
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    # Feature columns describe how to use the input.
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    # Build 2 hidden layer DNN with 10, 10 units respectively.
    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        # Two hidden layers of 10 nodes each.
        hidden_units=[10, 10],
        model_dir='models/iris',
        # The model must choose between 3 classes.
        n_classes=3)

    # Train the Model.
    #print (train_x)
    classifier.train(
        input_fn=lambda:iris_data.train_input_fn(train_x, train_y,
                                                 args.batch_size),
        steps=args.train_steps)

    # Evaluate the model.
   # eval_result = classifier.evaluate(
   #     input_fn=lambda:iris_data.eval_input_fn(test_x, test_y,
   #                                            args.batch_size))

   # print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
    predictApp(classifier, args)
def getPrediction():
    print(request.json)
    '''request_data = {

        'Q1': [1.0, 0.0, 0.0, 0.0],
        'Q2': [1.0, 0.0, 0.0 ,0.0],
        'Q3': [0.0, 1.0, 0.0, 0.0],
        'Q4': [0.0, 1.0, 1.0, 0.0],
        'Q5': [0.0, 0.0, 0.0, 1.0],
        
    }'''
    request_data = request.json

    args = ''
    if globals.train_x is None:
        (globals.train_x,
         globals.train_y), (globals.test_x,
                            globals.test_y) = iris_data.load_data()

    # Feature columns describe how to use the input.
    my_feature_columns = []
    for key in globals.train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))
    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        # Two hidden layers of 10 nodes each.
        hidden_units=[10, 10],
        model_dir='models/iris',
        # The model must choose between 3 classes.
        n_classes=4)
    output = jsonify(predictApp(classifier, request_data))
    return output
Beispiel #4
0
def main(unused_argv):
    #Load traning and test data
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()
    #print(train_x)
    #print(train_y)
    #print(test_x)
    #print(test_y)
    #Create esimator
    cnnEstimator = tf.estimator.Estimator(model_fn=cnn,
                                          model_dir="/tmp/convnet_model")

    tensorsLog = {"probabilities": "softmax_tensor"}
    logginHook = tf.train.LoggingTensorHook(tensors=tensorsLog,
                                            every_n_iter=50)

    #Train the model
    train_input = tf.estimator.inputs.numpy_input_fn(x={"x": train_y},
                                                     y=train_y,
                                                     batch_size=100,
                                                     num_epochs=None,
                                                     shuffle=True)

    cnnEstimator.train(input_fn=train_input, steps=100, hooks=[logginHook])

    #Evaluate the model and print result
    eval_input_fn = tf.estimator.inputs.numpy_input_fn(
        x={"x": test_x}, y=test_y, num_epochs=1, shuffle=True)  #False innan

    eval_results = cnnEstimator.evaluate(input_fn=eval_input_fn)
    print(eval_results)
Beispiel #5
0
def trainSystem():
    # Fetch the data
    if globals.train_x is None:
        (globals.train_x, globals.train_y), (globals.test_x, globals.test_y) = iris_data.load_data()

    # Feature columns describe how to use the input.
    my_feature_columns = []
    for key in globals.train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    # Build 2 hidden layer DNN with 10, 10 units respectively.
    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        # Two hidden layers of 10 nodes each.
        hidden_units=[10, 10],
        model_dir='models/iris',
        # The model must choose between 3 classes.
        n_classes=3)

    # Train the Model.
    classifier.train(
        input_fn=lambda:iris_data.train_input_fn(globals.train_x, globals.train_y,
                                                 globals.batch_size),
        steps=globals.train_steps)
    return 'Training done'
Beispiel #6
0
def my_model():
    train_path = "data/iris_training.csv"
    test_path = "data/iris_test.csv"
    batch_size = 100
    train_steps = 1000

    _, (test_x, test_y) = load_data()

    # All the inputs are numeric
    # 定义特征列
    feature_columns = [
        tf.feature_column.numeric_column(key) for key in CSV_COLUMN_NAMES[:-1]
    ]

    # 构建estimator
    classifier = tf.estimator.LinearClassifier(feature_columns=feature_columns,
                                               n_classes=3,
                                               model_dir="models/iris_Linear")

    # 训练
    classifier.train(input_fn=lambda: csv_input_fn(train_path, batch_size),
                     steps=train_steps)

    # 评估,返回eval_result是一个字典,有4个key:accuracy,average_loss,global_step,loss
    eval_result = classifier.evaluate(input_fn=lambda: eval_input_fn(
        features=test_x, labels=test_y, batch_size=batch_size))

    print('Test set accuracy: {:0.3f}'.format(eval_result["accuracy"]))
def test():
    # Build Model
    x = tf.placeholder(tf.float32, [None, 4])
    y_label = tf.placeholder(tf.float32, [None, 3])
    w = tf.Variable(tf.zeros([4, 3]))
    b = tf.Variable(tf.zeros([3]))
    y = tf.nn.softmax(tf.matmul(x, w) + b)

    # Loss
    cross_entropy = tf.reduce_mean(
        -tf.reduce_sum(y_label * tf.log(y), reduction_indices=[1]))
    train = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

    # Prediction
    correct_prediction = tf.equal(tf.argmax(y, axis=1),
                                  tf.argmax(y_label, axis=1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    (train_x, train_y), (test_x, test_y) = iris_data.load_data()
    # Feature columns describe how to use the input.
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        # tensorflow提供了多种写日志文件的API
        writer = tf.summary.FileWriter('./log', tf.get_default_graph())
        writer.close()
        for step in range(1001):
            batch_x = train_x
            batch_y = train_y
            sess.run(train, feed_dict={x: batch_x, y: batch_y})
    def _setup(self):
        self.steps = 0
        self.session = tf.Session()
        (train_x, train_y), (test_x, test_y) = iris_data.load_data()
        self.train_x = train_x
        self.train_y = train_y

        self.test_x = test_x
        self.test_y = test_y

        # Feature columns describe how to use the input.
        my_feature_columns = []
        for key in train_x.keys():
            my_feature_columns.append(
                tf.feature_column.numeric_column(key=key))

        layer_size = int(self.config['layer_size'])

        # Build 2 hidden layer DNN with 10, 10 units respectively.
        self.classifier = tf.estimator.Estimator(
            model_fn=my_model,
            params={
                'feature_columns': my_feature_columns,
                # Two hidden layers of 10 nodes each.
                'hidden_units': [layer_size, layer_size],
                # The model must choose between 3 classes.
                'n_classes': 3,
            })

        self.saver = None
        self.global_step_tensor = training_util._get_or_create_global_step_read(
        )  # pylint: disable=protected-access
Beispiel #9
0
def main(_):
    connection = dbapi.connect(address=args.hxehost, port=args.hxeport, user=args.hxeusr, password=args.hxepwd)
    cursor = connection.cursor()

    # Create a TF_DATA schema
    try:
        cursor.execute('CREATE SCHEMA TF_DATA;')
    except (RuntimeError, TypeError, NameError, dbapi.Error):
        pass

    # Set the current schema to TF_DATA schema
    cursor.execute('SET SCHEMA TF_DATA;')

    # Drop the table before creating them
    try:
        cursor.execute('DROP TABLE TF_DATA.IRIS_DATA;')
    except (RuntimeError, TypeError, NameError, dbapi.Error):
        pass

    # Create the tables
    cursor.execute('CREATE TABLE TF_DATA.IRIS_DATA  (ID INTEGER, SEPALLENGTH FLOAT, SEPALWIDTH FLOAT, PETALLENGTH FLOAT, PETALWIDTH FLOAT, SPECIES INT);')

    query_iris  = 'INSERT INTO TF_DATA.IRIS_DATA  (ID, SEPALLENGTH, SEPALWIDTH, PETALLENGTH, PETALWIDTH, SPECIES) VALUES (?,?,?,?,?,?)'

    # Load the IRIS data in the table
    imported_row = 0
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()
    for index, row in test_x.iterrows():
        cursor.execute(query_iris, (index, row["SepalLength"], row["SepalWidth"], row["PetalLength"], row["PetalWidth"], test_y[index]))
        imported_row +=1
    print("Import row(s) " + str(imported_row))

    cursor.close()
    connection.close()
Beispiel #10
0
def main(argv):
    args = parser.parse_args(argv[1:])

    # Fetch the data
    # train_x.keys() are the CSV_COLUMN_NAMES GIVEN IN iris_data.py file
    # train_x are the sizes of the petals training values without the last
    #     column that indicates the type of the iris
    # train_y is the last column that indicates the type of the iris
    # test_x.keys(), test_x, test_y same that train_x and train_y
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    # Feature columns describe how to use the input.
    # need to specify which the feature columns are to Tensor flow
    # gets the info, name of the feature and the value and init tf stuff
    # ie: _NumericColumn(key='SepalLength', shape=(1,), default_value=None, dtype=tf.float32
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    # Build 2 hidden layer DNN with 10, 10 units respectively.
    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        # Two hidden layers of 10 nodes each.
        hidden_units=[10, 10],
        # The model must choose between 3 classes.
        n_classes=3)

    # Train the Model.
    classifier.train(input_fn=lambda: iris_data.train_input_fn(
        train_x, train_y, args.batch_size),
                     steps=args.train_steps)

    # Evaluate the model.
    eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn(
        test_x, test_y, args.batch_size))

    print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

    # Generate predictions from the model
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }

    predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn(
        predict_x, labels=None, batch_size=args.batch_size))

    for pred_dict, expec in zip(predictions, expected):
        template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')

        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]

        print(
            template.format(iris_data.SPECIES[class_id], 100 * probability,
                            expec))
Beispiel #11
0
def main(argv):
    args = parser.parse_args(argv[1:])

    # Fetch the data
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    # Create feature columns for all features
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key, dtype=tf.float32))

    #alternative
    feature_columns = [tf.feature_column.numeric_column(name) for name in iris_data.CSV_COLUMN_NAMES[:-1]]


    # Build 2 hidden layer DNN with 10, 10 units respectively.
    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        hidden_units=[10, 10],
        n_classes=3)

    # Train the model.
    classifier.train(
        input_fn = lambda:iris_data.train_input_fn(train_x, train_y, args.batch_size),
        steps=args.train_steps)

    # Evaluate the model.
    eval_result = classifier.evaluate(
        input_fn = lambda:iris_data.eval_input_fn(test_x, test_y, args.batch_size))


    print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
    #eval_result = kwargs (keyworded variable-length argument list)

    # for key in eval_result:
    #     print("key: {}, value: {}".format(key, eval_result[key]))
    # eval_result = {accuracy: val, average_loss: val, loss: val, global_step: val}

    # Generate predictions from the model
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }

    predictions = classifier.predict(
        input_fn = lambda:iris_data.eval_input_fn(predict_x, labels=None, batch_size=args.batch_size))


    template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')

    for pred_dict, expec in zip(predictions, expected):
        class_id = pred_dict['class_ids'][0]
        print("class_id {}".format(class_id))
        probability = pred_dict['probabilities'][class_id]

        print(template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
Beispiel #12
0
def main(argv):
    args = parser.parse_args(argv[1:])

    # Fetch the data
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    # Feature columns describe how to use the input.
    feature_columns = []
    for key in train_x.keys():
        feature_columns.append(tf.feature_column.embedding_column(tf.feature_column.categorical_column_with_hash_bucket(key=key,
            hash_bucket_size=100, dtype=tf.int32),
            dimension=8))

    # Build 2 hidden layer DNN with 10, 10 units respectively.
    classifier = tf.estimator.Estimator(
        model_fn=my_model,
        params={
            'feature_columns': feature_columns,
            'input_dim': 4,
            'input_atoms':8,
            'output_dim': 3, # 3 classes
            'output_atoms':3,
            'iter_routing':2,
        })

    # Train the Model.
    classifier.train(
        input_fn=lambda:iris_data.train_input_fn(train_x, train_y, args.batch_size),
        steps=args.train_steps)

    # Evaluate the model.
    eval_result = classifier.evaluate(
            input_fn=lambda:iris_data.eval_input_fn(test_x, test_y, args.batch_size), steps=1)

    print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

    # Generate predictions from the model
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }

    predictions = classifier.predict(
        input_fn=lambda:iris_data.eval_input_fn(predict_x,
                                                labels=None,
                                                batch_size=args.batch_size))

    for pred_dict, expec in zip(predictions, expected):
        template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')

        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]

        print(template.format(iris_data.SPECIES[class_id],
                              100 * probability, expec))
Beispiel #13
0
    def _setup(self, config):
        """
        Setup your tensorflow model
        :param config:
        :return:
        """

        # Hyperparameters for this trial can be accessed in dictionary self.config
        self.config = config

        # save all checkpoints independently to ray.tune checkpoint_path to avoid deleting by
        # tune.trainable.restore_from_object model dir based on timestamp
        if self.config['exp'] in self.logdir:
            self.model_dir = self.logdir.split(self.config['exp'])[0] + self.config['exp']
        else:
            raise IndexError(self.logdir + ' does not contain splitter ' + self.config['exp'] + 'check configuration '
                                                                                                'logdir path and exp '
                                                                                                'filed')
        self.model_dir_full = self.model_dir + '/' + datetime.datetime.now().strftime("%d_%b_%Y_%I_%M_%S_%f%p")

        # configuration
        self.training_steps = 250
        self.run_config = tf.estimator.RunConfig(
            save_summary_steps=100,
            save_checkpoints_secs=None,  # save checkpoint only before and after self.estimator.train()
            save_checkpoints_steps=self.training_steps,  # save both iterator checkpoint and model checkpoints after
            # same number of steps
            keep_checkpoint_max=None,  # avoid removing checkpoints
            keep_checkpoint_every_n_hours=None)

        # load data
        (train_x, train_y), (test_x, test_y) = iris_data.load_data()
        # Feature columns describe how to use the input.
        self.my_feature_columns = []
        for key in train_x.keys():
            self.my_feature_columns.append(tf.feature_column.numeric_column(key=key))

        # estimator
        # Build 2 hidden layer DNN with 10, 10 units respectively.
        self.estimator = tf.estimator.DNNClassifier(
            feature_columns=self.my_feature_columns,
            # Two hidden layers of 10 nodes each.
            hidden_units=[10, 10],
            # The model must choose between 3 classes.
            n_classes=3,
            optimizer=tf.train.ProximalAdagradOptimizer(
                learning_rate=self.config['lr'],
                l1_regularization_strength=self.config['l1'],

            ))

        # data for evaluation
        self.input_fn_eval = lambda: iris_data.eval_input_fn(test_x, test_y,
                                                             batch_size=20)
        # data for train
        self.input_fn_train = lambda: iris_data.train_input_fn(train_x, train_y,
                                                               batch_size=20)
        self.steps = 0
Beispiel #14
0
def main(argv):
    args = parser.parse_args(argv[1:])

    # Fetch the data
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    # Feature columns describe how to use the input.
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    #print (train_x['SepalLength'])

    # Estimator will build the complete neual network.
    # Build 2 hidden layer DNN with 10, 10 units respectively.
    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        # Two hidden layers of 10 nodes each. (2 hidden layers and 10 neurons in each hidden layer)
        hidden_units=[10, 10],
        # The model must choose between 3 classes. (output layer)
        n_classes=3)

    # Train the Model.
    classifier.train(
        # train_input_fn(features, labels, batch_size)
        input_fn=lambda: iris_data.train_input_fn(train_x, train_y, args.
                                                  batch_size),
        # default value of args.train_steps = 1000
        steps=args.train_steps
    )  # steps - tells the dataframe train to stop training after specified number of iterations

    # Evaluate the model.
    eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn(
        test_x, test_y, args.batch_size))

    print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

    # Generate predictions from the model
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }

    predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn(
        predict_x, labels=None, batch_size=args.batch_size))

    template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')

    for pred_dict, expec in zip(predictions, expected):
        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]

        print(
            template.format(iris_data.SPECIES[class_id], 100 * probability,
                            expec))
def getPredictionAlone():
    args = ''
    if globals.train_x is None:
        (globals.train_x,
         globals.train_y), (globals.test_x,
                            globals.test_y) = iris_data.load_data()

    # Feature columns describe how to use the input.
    my_feature_columns = []
    for key in globals.train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))
    print(my_feature_columns)
    classifier = tf.estimator.DNNClassifier(feature_columns=my_feature_columns,
                                            hidden_units=[10, 10],
                                            n_classes=4,
                                            model_dir='models/iris')
    # feature_columns=my_feature_columns,
    # Two hidden layers of 10 nodes each.
    #   hidden_units=[10, 10],
    #   n_classes=3,

    #   model_dir='models/iris'
    # The model must choose between 3 classes.)
    # classifier = tf.estimator.Estimator(model_fn='DNNClassifier', model_dir='models/iris')
    print('this is called')

    def model_api():
        # Generate predictions from the model
        expected = ['AI', 'Animation', 'SW', 'Cyber']
        predict_x = {
            'Q1': [1.0, 0.0, 0.0, 0.0],
            'Q2': [1.0, 0.0, 0.0, 0.0],
            'Q3': [0.0, 1.0, 0.0, 0.0],
            'Q4': [0.0, 1.0, 1.0, 0.0],
            'Q5': [0.0, 0.0, 0.0, 1.0],
        }
        predictions = classifier.predict(
            input_fn=lambda: iris_data.eval_input_fn(
                predict_x, labels=None, batch_size=1000))
        for pred_dict, expec in zip(predictions, expected):
            template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')

            class_id = pred_dict['class_ids'][0]
            probability = pred_dict['probabilities'][class_id]

            print(
                template.format(iris_data.SPECIES[class_id], 100 * probability,
                                expec))
            output_data = {
                "prediction": iris_data.SPECIES[class_id],
                "value": round(probability, 4)
            }
            #output_data = iris_data.SPECIES[class_id]
            return output_data

# output = jsonify(predictApp(classifier, args))

    return model_api
Beispiel #16
0
def main(argv):
    args = parser.parse_args(argv[1:])

    # Fetch the data
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    # Feature columns describe how to use the input.
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    my_checkpointing_config = tf.estimator.RunConfig(
        save_checkpoints_secs=20 * 60,  # Save checkpoints every 20 minutes.
        #save_checkpoints_steps = 200,   # Save checkpoints every 200 steps.
        keep_checkpoint_max=10,  # Retain the 10 most recent checkpoints.
    )

    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        hidden_units=[10, 10],  # can not restore checkpoint if model changes
        n_classes=3,
        model_dir='models/iris',
        config=my_checkpointing_config,
    )

    # Train the Model.
    classifier.train(input_fn=lambda: iris_data.train_input_fn(
        train_x, train_y, args.batch_size),
                     steps=args.train_steps)

    # Evaluate the model.
    eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn(
        test_x, test_y, args.batch_size))

    print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

    # Generate predictions from the model
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }

    predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn(
        predict_x, labels=None, batch_size=args.batch_size))

    template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')

    for pred_dict, expec in zip(predictions, expected):
        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]

        print(
            template.format(iris_data.SPECIES[class_id], 100 * probability,
                            expec))
def main(argv):
    args = parser.parse_args(argv[1:])

    # Fetch the data
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    # Feature columns describe how to use the input.
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    # Build 2 hidden layer DNN with 10, 10 units respectively.
    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        # Two hidden layers of 10 nodes each.
        hidden_units=[80, 80, 80, 80],
        # The model must choose between 3 classes.
        n_classes=3,
        model_dir="models/premade_1_0_0",
        dropout=0.1)

    # Train the Model.
    classifier.train(
        input_fn=lambda:iris_data.train_input_fn(train_x, train_y,
                                                 args.batch_size),
        steps=args.train_steps)
    #
    # # Evaluate the model.
    # eval_result = classifier.evaluate(
    #     input_fn=lambda:iris_data.eval_input_fn(test_x, test_y,
    #                                             args.batch_size))

    # print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

    # Generate predictions from the model
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.9, 5.9, 6.9],
        'SepalWidth': [3.1, 3.0, 3.1],
        'PetalLength': [4.1, 4.2, 5.4],
        'PetalWidth': [1.4, 1.5, 2.1],
    }

    predictions = classifier.predict(
        input_fn=lambda:iris_data.eval_input_fn(predict_x,
                                                labels=None,
                                                batch_size=args.batch_size))

    for pred_dict, expec in zip(predictions, expected):
        template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')

        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]

        print(template.format(iris_data.SPECIES[class_id],
                              100 * probability, expec))
Beispiel #18
0
def main(argv):
    
    # Fetch the data
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    # Feature columns describe how to use the input.
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    #Based on -- https://medium.com/tensorflow/multi-gpu-training-with-estimators-tf-keras-and-tf-data-ba584c3134db
    NUM_GPUS = 2
    strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=NUM_GPUS)
    #config = tf.estimator.RunConfig(train_distribute=strategy)
#    estimator = tf.keras.estimator.model_to_estimator(model,
#                                                  config=config)


    config = tf.estimator.RunConfig(
                model_dir="/tmp/tf_estimator_iris_model",
                save_summary_steps=1,
                train_distribute=strategy,
                save_checkpoints_steps=100,
                keep_checkpoint_max=3,
                log_step_count_steps=10)

    
    # Build 2 hidden layer DNN with 10, 10 units respectively.
    estimator = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        model_dir="/tmp/tf_estimator_iris_model",
        # Two hidden layers of 10 nodes each.
        hidden_units=[10, 10],
        config=config,
        # The model must choose between 3 classes.
        n_classes=3)

    train_input_fn = lambda:iris_data.train_input_fn(train_x, train_y,
                                                 opts.batch_size)


    train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,
                                        max_steps=1000)    



    # Evaluate the model.
    eval_input_fn = lambda:iris_data.eval_input_fn(test_x, test_y,
                                                opts.batch_size)

    eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,
                                      steps=None,
                                      start_delay_secs=0,
                                      throttle_secs=60)

    tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
Beispiel #19
0
def main(argv):
    args = parser.parse_args(argv[1:])

    # 获取数据
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    # 描述输入的特征
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    # 构建2个有隐藏层DNN,每层有10个神经元
    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        # 2层,每层有10个神经元
        hidden_units=[10, 10],
        # 标签有3种,所以预测结果是3
        n_classes=3,
        model_dir="model")

    # 训练模型
    classifier.train(
        input_fn=lambda:iris_data.train_input_fn(train_x, train_y,
                                                 args.batch_size),
        steps=args.train_steps)

    # 评估模型
    eval_result = classifier.evaluate(
        input_fn=lambda:iris_data.eval_input_fn(test_x, test_y,
                                                args.batch_size))

    print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

    # 为模型生成预测
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }

    predictions = classifier.predict(
        input_fn=lambda:iris_data.eval_input_fn(predict_x,
                                                labels=None,
                                                batch_size=args.batch_size))

    template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')

    for pred_dict, expec in zip(predictions, expected):
        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]

        print(template.format(iris_data.SPECIES[class_id],
                              100 * probability, expec))
Beispiel #20
0
def main(argv):
	args = parser.parse_args()

	# Fetch the data
	(train_x, train_y), (test_x, test_y) = iris_data.load_data()

	# Feature columns describe how to use the input.
	my_feature_columns = []
	for key in train_x.keys():
		my_feature_columns.append(tf.feature_column.numeric_column(key=key))

	# Build 2 hidden layer DNN with 10, 10 units respectively.
	classifier = tf.estimator.DNNClassifier(
		feature_columns=my_feature_columns,
		# Two hidden layers of 10 nodes each.
		hidden_units=[10, 10],
		# The model must choose between 3 classes.
		n_classes=3)

	# Train the Model.
	classifier.train(input_fn=lambda:iris_data.train_input_fn(train_x, train_y, args.batch_size), steps=args.train_steps)

	# Evaluate the model.
	eval_result = classifier.evaluate(input_fn=lambda:iris_data.eval_input_fn(test_x, test_y, args.batch_size))

	print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

	# Generate predictions from the model
	expected = ['Setosa', 'Versicolor', 'Virginica']
	predict_x = {
		'SepalLength': [5.1, 5.9, 6.9],
		'SepalWidth': [3.3, 3.0, 3.1],
		'PetalLength': [1.7, 4.2, 5.4],
		'PetalWidth': [0.5, 1.5, 2.1],
	}

	predictions = classifier.predict(input_fn=lambda:iris_data.eval_input_fn(predict_x, labels=None, batch_size=args.batch_size))

	template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')
	for pred_dict, expec in zip(predictions, expected):
		'''
		print(pred_dict) result
		{
			'logits': array([-16.18431473, 5.61845016, 13.43036652], dtype=float32),
			'probabilities': array([1.37509145e-13, 4.04717575e-04, 9.99595344e-01], dtype=float32),
			'class_ids': array([2]),
			'classes': array([b'2'], dtype=object)
		}
		'''
		class_id = pred_dict['class_ids'][0]
		probability = pred_dict['probabilities'][class_id]

		print(template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def main(argv):
    args = parser.parse_args(argv[1:])

    # Fetch the data
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    # Feature columns describe how to use the input.
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    # Build 2 hidden layer DNN with 10, 10 units respectively.
    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        # Two hidden layers of 10 nodes each.
        hidden_units=[10, 10],
        model_dir='res1',
        # The model must choose between 3 classes.
        n_classes=4)

    # Train the Model.
    classifier.train(input_fn=lambda: iris_data.train_input_fn(
        train_x, train_y, args.batch_size),
                     steps=args.train_steps)

    # Evaluate the model.
    eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn(
        test_x, test_y, args.batch_size))

    print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

    # Generate predictions from the model
    expected = ['MSC', 'MBA', 'MCA', 'JOB']
    predict_x = {
        'Q1': [1.0, 0.0, 0.0, 0.0],
        'Q2': [1.0, 0.0, 1.0, 1.0],
        'Q3': [1.0, 1.0, 0.0, 0.0],
        'Q4': [1.0, 0.0, 0.0, 0.0],
        'Q5': [1.0, 0.0, 1.0, 0.0],
    }

    predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn(
        predict_x, labels=None, batch_size=args.batch_size))

    template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')

    for pred_dict, expec in zip(predictions, expected):
        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]

        print(
            template.format(iris_data.SPECIES[class_id], 100 * probability,
                            expec))
def main(argv):
    args = parser.parse_args(argv[1:])

    # Fetch the data
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    # Feature columns descrive how to use the input.
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    # Build 2 hidden layer DNN with 10, 10 units respectively.
    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        hidden_units=[10, 10],
        n_classes=3,
        model_dir="models/iris"
    )

    # Train the Model
    classifier.train(
        input_fn=lambda: iris_data.train_input_fn(train_x, train_y, args.batch_size),
        steps=args.train_steps
    )

    # Evaluate the model.
    eval_result = classifier.evaluate(
        input_fn=lambda: iris_data.eval_input_fn(test_x, test_y, args.batch_size)
    )

    print("\nTest set accuracy: {accuracy:0.3f}\n".format(**eval_result))

    # Generate predictions from the model
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        "SepalLength": [5.1, 5.9, 6.9],
        "SepalWidth": [3.3, 3.0, 3.1],
        "PetalLength": [1.7, 4.2, 5.4],
        "PetalWidth": [0.5, 1.5, 2.1],
    }

    predictions = classifier.predict(
        input_fn=lambda: iris_data.eval_input_fn(predict_x, labels=None, batch_size=args.batch_size)
    )

    for pred_dict, expec in zip(predictions, expected):
        template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')

        class_id = pred_dict["class_ids"][0]
        probability = pred_dict["probabilities"][class_id]

        print(template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
def main(argv):
    args = parser.parse_args(argv[1:])

    # fetch the data
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    # feature column describe how to use the input
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    # build 2 hidden layer DNN with 10, 10, units respectively
    classifier = tf.estimator.Estimator(
        model_fn=my_model_fn,
        params={
            'feature_columns': my_feature_columns,
            # two hidden layers of 10 nodes each
            'hidden_units': [10, 10],
            # the model must choose between 3 classes
            'n_classes': 3
        })

    # Train the model
    classifier.train(input_fn=lambda: iris_data.train_input_fn(
        train_x, train_y, args.batch_size), )

    # Evaluate the model
    eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn(
        test_x, test_y, args.batch_size))

    print('\nTest set accuracy: {accuracy: 0.3f}\n'.format(**eval_result))

    # Generate predictions from the model
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }

    predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn(
        features=predict_x, labels=None, batch_size=args.batch_size))

    for pred_dict, expec in zip(predictions, expected):
        template = '\nPrediction is "{}" ({:.1f}%), expected "{}"'
        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]
        print(
            template.format(iris_data.SPECIES[class_id], 100 * probability,
                            expec))
Beispiel #24
0
def app1():
    import iris_data

    batch_size = 100
    train_steps = 1000

    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    my_feature_columns = [
        tf.feature_column.numeric_column(key=key) for key in train_x.keys()
    ]

    classifier = tf.estimator.Estimator(model_fn=my_model,
                                        params={
                                            'feature_columns':
                                            my_feature_columns,
                                            'hidden_units': [10, 10],
                                            'n_classes': 3
                                        },
                                        model_dir='./output/d1')

    classifier.train(input_fn=lambda: iris_data.train_input_fn(
        train_x, train_y, batch_size),
                     steps=train_steps)

    eval_result = classifier.evaluate(
        input_fn=lambda: iris_data.eval_input_fn(test_x, test_y, batch_size))

    print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }

    predictions = classifier.predict(input_fn=lambda: iris_data.eval_input_fn(
        predict_x, labels=None, batch_size=batch_size))

    for pred_dict, expec in zip(predictions, expected):
        template = '\nPrediction is "{}" ({:.1f}%), expected "{}"'

        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]

        print(
            template.format(iris_data.SPECIES[class_id], 100 * probability,
                            expec))
Beispiel #25
0
def main(_):
    # build dataset
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    # feature_column(describe how to use input)
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    # estimator
    classifier = tf.estimator.DNNClassifier(
        hidden_units=[10, 10],
        feature_columns=my_feature_columns,
        n_classes=3,
    )

    classifier.train(input_fn=lambda: iris_data.train_input_fn(
        train_x, train_y, FLAGS.batch_size),
                     steps=FLAGS.train_steps)

    evaluate_result = classifier.evaluate(
        input_fn=lambda: iris_data.eval_input_fn(test_x, test_y, FLAGS.
                                                 batch_size),
        steps=FLAGS.train_steps)
    # {'accuracy': 0.96666664, 'average_loss': 0.058699723, 'loss': 1.7609917, 'global_step': 1000}
    print('-' * 40)
    print('accuracy :{accuracy:0.3f}'.format(**evaluate_result))

    # Generate predictions from the model
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }
    print('-' * 40)
    predict_result = classifier.predict(
        input_fn=lambda: iris_data.eval_input_fn(
            predict_x, label=None, batch_size=FLAGS.batch_size), )
    template = '预测结果: {} 概率:{:0.2f}%  。期望: {}'
    for predict_dict, expec in zip(predict_result, expected):
        # predict_dict : {
        #                 'logits': array([ 13.478667 ,   7.7477455, -21.122244 ], dtype=float32),
        #                 'probabilities': array([9.9676645e-01, 3.2335958e-03, 9.3671849e-16], dtype=float32),
        #                 'class_ids': array([0]), 'classes': array([b'0'], dtype=object)}
        class_ids = predict_dict['class_ids'][0]
        _predict = iris_data.SPECIES[class_ids]
        probability = predict_dict['probabilities'][class_ids]
        print(template.format(_predict, probability * 100, expec))
Beispiel #26
0
def main(argv):
    args = parser.parse_args(argv[1:])
    batch_size = args.batch_size
    train_steps= args.train_steps

    (train_x, train_y), (test_x, test_y) = iris_data.load_data()
    # print(train_y)

    # configure feature cloumns describe how to use the input.
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    # print(my_feature_columns)

    # build estimator
    classifer = tf.estimator.DNNClassifier(
            feature_columns = my_feature_columns,
            hidden_units=[10, 10],
            n_classes = 3
            )

    classifer.train(
            input_fn = lambda:iris_data.train_input_fn(train_x, train_y, batch_size),
            steps=train_steps
            )

    eval_result = classifer.evaluate(
            input_fn=lambda:iris_data.eval_input_fn(test_x, test_y, batch_size),
            )
    print('\nTest SET ACCURACY:{accuracy:0.3f}\n'.format(**eval_result))

    # generate predictions from the model
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
            'SepalLength': [5.1, 5.9, 6.9],
	    'SepalWidth': [3.3, 3.0, 3.1],
	    'PetalLength': [1.7, 4.2, 5.4],
	    'PetalWidth': [0.5, 1.5, 2.1],}
    predictions = classifer.predict(
            input_fn=lambda:iris_data.eval_input_fn(predict_x, labels=None,
                                                    batch_size=batch_size)
            )
    for pred_dict, expec in zip(predictions, expected):
        template = ('\nPrediction is "{}" ("{:.1f}%"), excepted "{}"')
        class_id = pred_dict['class_ids'][0]
        prob = pred_dict['probabilities'][class_id]

        print(template.format(iris_data.SPECIES[class_id], 100*prob, expec))
def main():
    # 加载数据
    (train_x, train_y), (test_x, test_y) = load_data()

    # 定义特征列
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    # 选择模型
    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        hidden_units=[10, 10],  # 定义每个隐层的单元数(2个隐层,每层10个隐藏单元)
        n_classes=3,  # 类别数
        model_dir="models/iris")  # 指定模型保存目录

    # 训练,传入数据train_x即为特征,train_y即为标签
    classifier.train(input_fn=lambda: train_input_fn(
        features=train_x, labels=train_y, batch_size=batch_size),
                     steps=train_steps)

    # 评估,返回eval_result是一个字典,有4个key:accuracy,average_loss,global_step,loss
    eval_result = classifier.evaluate(input_fn=lambda: eval_input_fn(
        features=test_x, labels=test_y, batch_size=batch_size))

    print('Test set accuracy: {:0.3f}'.format(eval_result["accuracy"]))

    # 预测,3个实例
    expected = ['Setosa', 'Versicolor', 'Virginica']  # 这3个实例的期望类别
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }

    # predictions包含所有的预测
    predictions = classifier.predict(input_fn=lambda: eval_input_fn(
        features=predict_x, labels=None, batch_size=batch_size))

    template = 'Prediction is "{}" ({:.1f}%), expected "{}"'  # 类别,概率,期望类别

    # 打印预测结果
    for pred_dict, expec in zip(predictions, expected):
        class_id = pred_dict['class_ids'][0]  # 预测的标签编号
        probability = pred_dict['probabilities'][class_id]  # 该类别的概率

        print(template.format(SPECIES[class_id], 100 * probability, expec))
Beispiel #28
0
def main():
    # 加载数据
    (train_x, train_y), (test_x, test_y) = load_data()

    # 定义特征列
    my_feature_columns = []
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    # 自定义模型
    classifier = tf.estimator.Estimator(model_fn=my_model,
                                        params={
                                            'feature_columns':
                                            my_feature_columns,
                                            'hidden_units': [10, 10],
                                            'n_classes': 3,
                                        })

    # 训练模型,调用train()方法时,Estimator 框架会调用模型函数并将 mode 设为 ModeKeys.TRAIN
    classifier.train(
        input_fn=lambda: train_input_fn(train_x, train_y, batch_size),  # 输入函数
        steps=train_steps)

    # 评估模型
    eval_result = classifier.evaluate(
        input_fn=lambda: eval_input_fn(test_x, test_y, batch_size))

    print('Test set accuracy: {:0.3f}'.format(eval_result["accuracy"]))

    # 预测
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }

    predictions = classifier.predict(input_fn=lambda: eval_input_fn(
        predict_x, labels=None, batch_size=batch_size))

    for pred_dict, expec in zip(predictions, expected):
        template = 'Prediction is "{}" ({:.1f}%), expected "{}"'

        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]

        print(template.format(SPECIES[class_id], 100 * probability, expec))
Beispiel #29
0
def main(argv):
    print(argv)
    args = parser.parse_args(argv[1:])
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    print(test_x.head())
    my_feature_columns = []
    for key in train_x.keys():
        print(key)
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    my_checkpoint_config = tf.estimator.RunConfig(
        save_checkpoints_secs=20 * 60,
        keep_checkpoint_max=5,
    )
    classifier = tf.estimator.Estimator(model_fn=my_model_fn,
                                        model_dir='./models/iris',
                                        params={
                                            'feature_columns':
                                            my_feature_columns,
                                            'hidden_units': [10, 10],
                                            'n_classes': 3,
                                        })

    classifier.train(
        input_fn=lambda: train_input_fn(train_x, train_y, args.batch_size),
        steps=args.train_steps)
    eval_result = classifier.evaluate(
        input_fn=lambda: eval_input_fn(test_x, test_y, args.batch_size))
    print('\nTest accuracy:{accuracy:0.3f}\n'.format(**eval_result))

    expected = ['Setosa', 'Versicolor', 'Virginica']
    SPECIES = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }
    predictions = classifier.predict(
        input_fn=lambda: eval_input_fn(predict_x, batch_size=args.batch_size))
    template = ('\n prediction is "{}" ({:.1f}%),expect {}')
    for pred_dict, expec in zip(predictions, expected):
        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]
        print(template.format(SPECIES[class_id], 100 * probability, expec))
def main(argv):
    args = parser.parse_args(argv[1:])

    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    my_feature_columns = []
    for key in train_x:
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))

    classifier = tf.estimator.Estimator(model_fn=my_model,
                                        params={
                                            'feature_columns':
                                            my_feature_columns,
                                            'hidden_units': [20, 20, 20],
                                            'n_classes': 3,
                                        })

    classifier.train(input_fn=lambda: iris_data.train_input_fn(
        train_x, train_y, args.batch_size),
                     steps=args.train_steps)

    eval_result = classifier.evaluate(input_fn=lambda: iris_data.eval_input_fn(
        test_x, test_y, args.batch_size))

    print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }

    predications = classifier.predict(input_fn=lambda: iris_data.eval_input_fn(
        predict_x, labels=None, batch_size=args.batch_size))

    for pred_dict, expec in zip(predications, expected):
        template = ('\nPrediction is "{}" ({:.1f}%), expected "{}"')

        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probablities'][class_id]

        print(
            template.format(iris_data.SPECIES[class_id], 100 * probability,
                            expec))
Beispiel #31
0
def main(_):
    (train_x, train_y), (test_x, test_y) = iris_data.load_data()

    my_feature_columns = []
    # train_x.keys()获取列名
    for key in train_x.keys():
        my_feature_columns.append(tf.feature_column.numeric_column(key=key))
    print(my_feature_columns)
    # 创建有两个隐藏层,每层10个节点的神经网络
    classifier = tf.estimator.DNNClassifier(
        feature_columns=my_feature_columns,
        hidden_units=[10, 10],
        n_classes=3
    )

    # train
    classifier.train(
        input_fn=lambda :iris_data.train_input_fn(train_x, train_y, FLAGS.batch_size),
        steps=FLAGS.train_step
    )
    # evaluate
    eval_result = classifier.evaluate(
        input_fn=lambda: iris_data.eval_input_fn(test_x, test_y, FLAGS.batch_size)
    )
    #print('===========', eval_result)
    # {'accuracy': 0.96666664, 'average_loss': 0.065968044, 'loss': 1.9790413, 'global_step': 1000}
    print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

    # predict
    expected = ['Setosa', 'Versicolor', 'Virginica']
    predict_x = {
        'SepalLength': [5.1, 5.9, 6.9],
        'SepalWidth': [3.3, 3.0, 3.1],
        'PetalLength': [1.7, 4.2, 5.4],
        'PetalWidth': [0.5, 1.5, 2.1],
    }
    predictions = classifier.predict(
        input_fn=lambda: iris_data.eval_input_fn(predict_x, labels=None, batch_size=FLAGS.batch_size)
    )

    for pred_dict, expec in zip(predictions, expected):
        template = ('\nPrediction is "{}"({:.1f}%), expected "{}"')

        class_id = pred_dict['class_ids'][0]
        probability = pred_dict['probabilities'][class_id]
        print(template.format(iris_data.SPECIES[class_id], 100 * probability, expec))
Beispiel #32
0
#coding:utf-8
import tensorflow as tf
import iris_data

# Fetch the data
(train_x, train_y), (test_x, test_y) = iris_data.load_data()

my_feature_columns = []

for key in train_x.keys():
    my_feature_columns.append(tf.feature_column.numeric_column(key=key))

def my_model(features, labels, mode, params):
    net = tf.feature_column.input_layer(features, params['feature_columns'])
    for units in params['hidden_units']:
        net = tf.layers.dense(net, units, activation=tf.nn.relu)
    logits = tf.layers.dense(net, params['n_classes'], activation=None)
    predicted_class = tf.argmax(logits, 1)
    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode, predictions={
            'class_ids' : predicted_class[:, tf.newaxis],
            'probabilities' : tf.nn.softmax(logits),
            'logits' : logits
        })

    loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
    accuracy = tf.metrics.accuracy(labels=labels, predictions=predicted_class, name='acc_op')
    metrics = {'accuracy' : accuracy}
    tf.summary.scalar('accuracy', accuracy[1])

    if mode == tf.estimator.ModeKeys.EVAL: