Esempio n. 1
0
def main(_):
    mnist = input_data.read_data_sets("/tmp/data")
    X_train = mnist.train.images
    X_test = mnist.test.images
    Y_train = mnist.train.labels.astype("int")
    Y_test = mnist.test.labels.astype("int")

    config = RunConfig(tf_random_seed=42, save_checkpoints_secs=10)
    feature_cols = tf.contrib.learn.infer_real_valued_columns_from_input(
        X_train)
    validation_monitor = monitors.ValidationMonitor(x=X_test,
                                                    y=Y_test,
                                                    every_n_steps=100)
    dnn_clf = DNNClassifier(
        hidden_units=[300, 100],
        n_classes=10,
        feature_columns=feature_cols,
        config=config,
        model_dir="/home/mtb/Projects/machine_learning/tensorflow/mnist")

    dnn_clf.fit(X_train,
                Y_train,
                batch_size=50,
                steps=4000,
                monitors=[validation_monitor])
    accuracy_score = dnn_clf.evaluate(x=X_test, y=Y_test)["accuracy"]

    print(' accuracy_score:   {0} '.format(accuracy_score))
Esempio n. 2
0
def train_and_eval():
    """Train and evaluate the model"""
    # Read data
    df_train_test = pd.read_csv(
        tf.gfile.Open(TRAIN_N_TEST_FILE),
        low_memory=False)

    # Split training and testing data
    (df_train, df_test) = train_test_split(df_train_test, test_size=0.25)

    # Build the estimator
    print("\nBuilding Estimator...")
    model = build_estimator(MODEL_DIR)

    # Create a validation monitor
    val_mon = monitors.ValidationMonitor(
        input_fn=lambda: input_fn(df_test),
        every_n_steps=10, early_stopping_rounds=100)

    # Fit and evaluate
    print("\nFitting with {} steps".format(TRAIN_STEPS))
    model.fit(
        input_fn=lambda: input_fn(df_train),
        steps=TRAIN_STEPS,
        # monitors=[val_mon]) # why doesn't this work when it's passed in?
        )

    print("\nEvaluating...")
    results = model.evaluate(
        input_fn=lambda: input_fn(df_test),
        steps=1)

    return model, results
Esempio n. 3
0
# Split it into train / test subsets

X_train, X_test, y_train, y_test = cross_validation.train_test_split(
    X, y, test_size=0.2, random_state=42)

# Split X_train again to create validation data

X_train, X_val, y_train, y_val = cross_validation.train_test_split(
    X_train, y_train, test_size=0.2, random_state=42)

# TensorFlow model using Scikit Flow ops


def conv_model(X, y):
    X = tf.expand_dims(X, 3)
    features = tf.reduce_max(learn.ops.conv2d(X, 12, [3, 3]), [1, 2])
    features = tf.reshape(features, [-1, 12])
    return learn.models.logistic_regression(features, y)


val_monitor = monitors.ValidationMonitor(X_val, y_val, every_n_steps=50)
# Create a classifier, train and predict.
classifier = learn.TensorFlowEstimator(model_fn=conv_model,
                                       n_classes=10,
                                       steps=1000,
                                       learning_rate=0.05,
                                       batch_size=128)
classifier.fit(X_train, y_train, val_monitor)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Test Accuracy: {0:f}'.format(score))
Esempio n. 4
0
# Split it into train / test subsets

X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y,
                                                                     test_size=0.2,
                                                                     random_state=42)

# Split X_train again to create validation data

X_train, X_val, y_train, y_val = cross_validation.train_test_split(X_train,
                                                                   y_train,
                                                                   test_size=0.2,
                                                                   random_state=42)

# TensorFlow model using Scikit Flow ops


def conv_model(X, y):
    X = tf.expand_dims(X, 3)
    features = tf.reduce_max(learn.ops.conv2d(X, 12, [3, 3]), [1, 2])
    features = tf.reshape(features, [-1, 12])
    return learn.models.logistic_regression(features, y)

val_monitor = monitors.ValidationMonitor(X_val, y_val, n_classes=10, print_steps=50)
# Create a classifier, train and predict.
classifier = learn.TensorFlowEstimator(model_fn=conv_model, n_classes=10,
                                        steps=1000, learning_rate=0.05,
                                        batch_size=128)
classifier.fit(X_train, y_train, val_monitor)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Test Accuracy: {0:f}'.format(score))
def main(unused_argv):
    tf.logging.set_verbosity(logging.ERROR)

    # configuration for model
    gpu_options = tf.GPUOptions(allow_growth=True)
    sess_config = tf.ConfigProto(gpu_options=gpu_options)
    sess_config.allow_soft_placement = True
    sess_config.log_device_placement = FLAGS.log_device_placement

    if FLAGS.run_experiment:
        config = tf.contrib.learn.RunConfig(model_dir=FLAGS.train_dir,save_checkpoints_steps=100)
        # config = config.replace(sess_config=sess_config)
        tf.logging.set_verbosity(logging.INFO)
        classifier = tf.estimator.Estimator(
            model_fn=model_fn_cnn,
            model_dir= FLAGS.train_dir,
            config=config
        )
        validation_monitor = monitors.ValidationMonitor(
            input_fn=functools.partial(input_fn, subset="evaluation"),
            eval_steps=128,
            every_n_steps=88,
            early_stopping_metric="accuracy",
            early_stopping_rounds = 1000
        )
        hooks = [ validation_monitor]
        # you can use both core Estimator or contrib Estimator
        # contrib_classifier = contrib_estimator.Estimator(
        #                 model_fn=model_fn_cnn_experiment,
        #                 model_dir=FLAGS.train_dir,
        #                 config=config
        #                 )
        experiment = tf.contrib.learn.Experiment(
            classifier,
            train_input_fn=functools.partial(input_fn, subset="training"),
            eval_input_fn=functools.partial(input_fn, subset="evaluation"),
            train_steps=FLAGS.train_steps,
            eval_steps=100,
            min_eval_frequency=80,
            train_monitors=hooks
            # eval_metrics="accuracy"
        )
        experiment.train_and_evaluate()
    else:
        start_time = datetime.datetime.now()
        config = tf.estimator.RunConfig()
        config = config.replace(session_config=sess_config)
        per_example_hook = ExamplesPerSecondHook(FLAGS.train_batch_size, every_n_steps=100)
        hooks = [per_example_hook]
        classifier = tf.estimator.Estimator(
            model_fn=model_fn_cnn,
            model_dir= FLAGS.train_dir,
            config=config
        )
        classifier.train(input_fn=functools.partial(input_fn,subset="training"),
                         steps=FLAGS.train_steps,
                         hooks=hooks
                         )

        train_time = datetime.datetime.now() - start_time
        print("Training complete in : minutes:{}".format(train_time.total_seconds()/60))
        tf.logging.set_verbosity(logging.WARNING)
        print('Evaluation on test data...')
        eval_results = classifier.evaluate(
            input_fn=functools.partial(input_fn,subset="evaluation"),
            steps=100)
        print(eval_results)

        print("Evaluation on training data...")
        eval_results = classifier.evaluate(
            input_fn=functools.partial(input_fn,subset="training"),
            steps=100)
        print(eval_results)