def test_input_fn(self): dataset = census_dataset.input_fn(self.input_csv, 1, False, 1) features, labels = dataset.make_one_shot_iterator().get_next() with self.test_session() as sess: features, labels = sess.run((features, labels)) # Compare the two features dictionaries. for key in TEST_INPUT_VALUES: self.assertTrue(key in features) self.assertEqual(len(features[key]), 1) feature_value = features[key][0] # Convert from bytes to string for Python 3. if isinstance(feature_value, bytes): feature_value = feature_value.decode() self.assertEqual(TEST_INPUT_VALUES[key], feature_value) self.assertFalse(labels)
def eval_input_fn(): return census_dataset.input_fn(test_file, 1, False, flags_obj.batch_size)
def train_input_fn(): return census_dataset.input_fn(train_file, flags_obj.epochs_between_evals, True, flags_obj.batch_size)
def input_fn(): return census_dataset.input_fn( TEST_CSV, num_epochs=num_epochs, shuffle=shuffle, batch_size=batch_size)
def eval_input_fn(): return census_dataset.input_fn(data_file=test_file, num_epochs=1, shuffle=False, batch_size=flags_obj.batch_size)
def train_input_fn(): return census_dataset.input_fn( data_file=train_file, num_epochs=flags_obj.epochs_between_evals, shuffle=True, batch_size=flags_obj.batch_size)
def eval_input_fn(): return census_dataset.input_fn(test_file, flags_obj.epochs_between_evals, False, flags_obj.batch_size)