def main(): train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000, valid_portion=0.1) trainX, trainY = train testX, testY = test trainX = pad_sequences(trainX, maxlen=100, value=0.) testX = pad_sequences(testX, maxlen=100, value=0.) trainY = np.asarray(trainY) testY = np.asarray(testY) data_set = DataSet(trainX, trainY, testX, testY) training_cnf = { 'classification': True, 'validation_scores': [('validation accuracy', util.accuracy_tf)], 'num_epochs': 50, 'input_size': (100, ), 'lr_policy': StepDecayPolicy(schedule={ 0: 0.01, 30: 0.001, }) } util.init_logging('train.log', file_log_level=logging.INFO, console_log_level=logging.INFO) learner = SupervisedLearner(model, training_cnf, classification=training_cnf['classification'], is_summary=False) learner.fit(data_set, weights_from=None, start_epoch=1)
def train(): mnist = input_data.read_data_sets("MNIST_data/", one_hot=False) width = 28 height = 28 train_images = mnist[0].images.reshape(-1, height, width, 1) train_labels = mnist[0].labels validation_images = mnist[1].images.reshape(-1, height, width, 1) validation_labels = mnist[1].labels data_set = DataSet(train_images, train_labels, validation_images, validation_labels) training_cnf = { 'classification': True, 'validation_scores': [('validation accuracy', util.accuracy_wrapper), ('validation kappa', util.kappa_wrapper)], 'num_epochs': 50, 'lr_policy': StepDecayPolicy( schedule={ 0: 0.01, 30: 0.001, } ) } util.init_logging('train.log', file_log_level=logging.INFO, console_log_level=logging.INFO) trainer = SupervisedTrainer(model, training_cnf, classification=training_cnf[ 'classification'], is_summary=True) trainer.fit(data_set, weights_from=None, start_epoch=1, verbose=1, summary_every=10)
def train(): mnist = input_data.read_data_sets("MNIST_data/", one_hot=False) width = 28 height = 28 train_images = mnist[0].images.reshape(-1, height, width, 1) train_labels = mnist[0].labels validation_images = mnist[1].images.reshape(-1, height, width, 1) validation_labels = mnist[1].labels data_set = DataSet(train_images, train_labels, validation_images, validation_labels) training_cnf = { 'classification': True, 'validation_scores': [('accuracy', tf.metrics.accuracy), ('kappa', tf.contrib.metrics.cohen_kappa)], 'num_epochs': 50, 'batch_size_train': 32, 'batch_size_test': 32, 'input_size': (28, 28, 1), 'lr_policy': StepDecayPolicy(schedule={ 0: 0.01, 30: 0.001, }) } learner = SupervisedLearner( model, training_cnf, classification=training_cnf['classification'], is_summary=True, num_classes=10) learner.fit(data_set, weights_from=None, start_epoch=1, summary_every=10)
'iterator_type': 'queued', # parallel or queued 'batch_size_train': 16, 'batch_size_test': 16, 'l2_reg': 0.002, 'aug_params': { 'zoom_range': (1 / 1.05, 1.05), 'rotation_range': (-5, 5), 'shear_range': (0, 0), 'translation_range': (-20, 20), 'do_flip': True, 'allow_stretch': True, }, 'num_epochs': 100, 'summary_every': 5, 'lr_policy': StepDecayPolicy(schedule={ 0: 0.001, 15: 0.0001, }), 'optimizer': tf.train.AdamOptimizer(), 'validation_scores': [('validation accuracy', util.accuracy_wrapper), ('validation kappa', util.kappa_wrapper)], }
logits = fully_connected(x, n_output=10, name="logits", **logit_args) predictions = softmax(logits, name='predictions', **common_args) return end_points(is_training) training_cnf = { 'classification': True, 'validation_scores': [('validation accuracy', util.accuracy_wrapper), ('validation kappa', util.kappa_wrapper)], 'num_epochs': 50, 'lr_policy': StepDecayPolicy(schedule={ 0: 0.01, 30: 0.001, }) } util.init_logging('train.log', file_log_level=logging.INFO, console_log_level=logging.INFO) trainer = SupervisedTrainer(model, training_cnf, classification=training_cnf['classification']) trainer.fit(data_set, weights_from=None, start_epoch=1, verbose=1, summary_every=10)
}, 'standardizer': AggregateStandardizer(mean=np.array( [108.64628601, 75.86886597, 54.34005737], dtype=np.float32), std=np.array([70.53946096, 51.71475228, 43.03428563], dtype=np.float32), u=np.array([[-0.56543481, 0.71983482, 0.40240142], [-0.5989477, -0.02304967, -0.80036049], [-0.56694071, -0.6935729, 0.44423429]], dtype=np.float32), ev=np.array([1.65513492, 0.48450358, 0.1565086], dtype=np.float32), sigma=0.5), 'num_epochs': 555, # 'lr_policy': PolyDecayPolicy(0.00005), 'lr_policy': StepDecayPolicy({ 0: 0.0002, 100: 0.0002, 200: 0.0002, 400: 0.0002, 500: 0.0001 }), 'classification': True, 'validation_scores': [('validation accuracy', util.accuracy_wrapper), ('validation kappa', util.kappa_wrapper)], # 'validation_scores': [('validation kappa', util.kappa_wrapper)], }
from tefla.core.lr_policy import StepDecayPolicy from tefla.utils import util cnf = { 'name': __name__.split('.')[-1], 'classification': True, 'iterator_type': 'queued', # parallel or queued 'batch_size_train': 16, 'batch_size_test': 16, 'l2_reg': 0.005, 'aug_params': { 'zoom_range': (1 / 1.1, 1.1), 'rotation_range': (-5, 5), 'shear_range': (1, 1), 'translation_range': (-20, 20), 'do_flip': True, 'allow_stretch': True, }, 'num_epochs': 15, 'summary_every': 5, 'lr_policy': StepDecayPolicy( schedule={ 0: 0.001, 10:0.0001, } ), 'optimizer': tf.train.AdamOptimizer(), 'validation_scores': [('validation accuracy', util.accuracy_wrapper), ('validation kappa', util.kappa_wrapper)], }