def testIrisES(self): random.seed(42) iris = datasets.load_iris() X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42) X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2) val_monitor = skflow.monitors.ValidationMonitor(X_val, y_val, n_classes=3) # classifier without early stopping - overfitting classifier1 = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3, steps=1000) classifier1.fit(X_train, y_train) score1 = metrics.accuracy_score(y_test, classifier1.predict(X_test)) # classifier with early stopping - improved accuracy on testing set classifier2 = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3, steps=1000) classifier2.fit(X_train, y_train, val_monitor) score2 = metrics.accuracy_score(y_test, classifier2.predict(X_test))
def testIrisMomentum(self): random.seed(42) iris = datasets.load_iris() X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=42) # setup exponential decay function def exp_decay(global_step): return tf.train.exponential_decay(learning_rate=0.1, global_step=global_step, decay_steps=100, decay_rate=0.001) custom_optimizer = lambda learning_rate: tf.train.MomentumOptimizer( learning_rate, 0.9) classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3, steps=800, learning_rate=exp_decay, optimizer=custom_optimizer) classifier.fit(X_train, y_train) score = metrics.accuracy_score(y_test, classifier.predict(X_test)) self.assertGreater(score, 0.7, "Failed with score = {0}".format(score))
def testDNNDropout0_1(self): # Dropping only a little. iris = datasets.load_iris() classifier = skflow.TensorFlowDNNClassifier( hidden_units=[10, 20, 10], n_classes=3, dropout=0.1) classifier.fit(iris.data, iris.target) score = accuracy_score(iris.target, classifier.predict(iris.data)) self.assertGreater(score, 0.9, "Failed with score = {0}".format(score))
def testNoCheckpoints(self): path = tf.test.get_temp_dir() + '/tmp/tmp.saver4' random.seed(42) iris = datasets.load_iris() classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3) classifier.fit(iris.data, iris.target) classifier.save(path) os.remove(os.path.join(path, 'checkpoint')) with self.assertRaises(ValueError): skflow.TensorFlowEstimator.restore(path)
def testDNN(self): path = tf.test.get_temp_dir() + '/tmp_saver3' random.seed(42) iris = datasets.load_iris() classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3) classifier.fit(iris.data, iris.target) classifier.save(path) new_classifier = skflow.TensorFlowEstimator.restore(path) self.assertEqual(type(new_classifier), type(classifier)) score = accuracy_score(iris.target, new_classifier.predict(iris.data)) self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def testIrisDNN(self): if HAS_SKLEARN: random.seed(42) iris = datasets.load_iris() classifier = skflow.TensorFlowDNNClassifier( hidden_units=[10, 20, 10], n_classes=3, steps=50) grid_search = GridSearchCV(classifier, {'hidden_units': [[5, 5], [10, 10]], 'learning_rate': [0.1, 0.01]}) grid_search.fit(iris.data, iris.target) score = accuracy_score(iris.target, grid_search.predict(iris.data)) self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def testIrisDNN(self): random.seed(42) iris = datasets.load_iris() classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3) classifier.fit(iris.data, iris.target) score = accuracy_score(iris.target, classifier.predict(iris.data)) self.assertGreater(score, 0.5, "Failed with score = {0}".format(score)) weights = classifier.weights_ self.assertEqual(weights[0].shape, (4, 10)) self.assertEqual(weights[1].shape, (10, 20)) self.assertEqual(weights[2].shape, (20, 10)) self.assertEqual(weights[3].shape, (10, 3)) biases = classifier.bias_ self.assertEqual(len(biases), 4)