def create_experiment_json_fn(output_dir): dataset_dir = './data/mnist' mnist.prepare(dataset_dir) train_data_file = mnist.RECORD_FILE_NAME_FORMAT.format(dataset_dir, plx.ModeKeys.TRAIN) eval_data_file = mnist.RECORD_FILE_NAME_FORMAT.format(dataset_dir, plx.ModeKeys.EVAL) meta_data_file = mnist.MEAT_DATA_FILENAME_FORMAT.format(dataset_dir) config = { 'name': 'lenet_mnsit', 'output_dir': output_dir, 'eval_every_n_steps': 10, 'train_steps_per_iteration': 100, 'run_config': {'save_checkpoints_steps': 100}, 'train_input_data_config': { 'pipeline_config': {'name': 'TFRecordImagePipeline', 'batch_size': 64, 'num_epochs': 1, 'shuffle': True, 'dynamic_pad': False, 'params': {'data_files': train_data_file, 'meta_data_file': meta_data_file}}, }, 'eval_input_data_config': { 'pipeline_config': {'name': 'TFRecordImagePipeline', 'batch_size': 32, 'num_epochs': 1, 'shuffle': True, 'dynamic_pad': False, 'params': {'data_files': eval_data_file, 'meta_data_file': meta_data_file}}, }, 'estimator_config': {'output_dir': output_dir}, 'model_config': { 'summaries': 'all', 'model_type': 'classifier', 'loss_config': {'name': 'softmax_cross_entropy'}, 'eval_metrics_config': [{'name': 'streaming_accuracy'}, {'name': 'streaming_precision'}], 'optimizer_config': {'name': 'Adam', 'learning_rate': 0.07, 'decay_type': 'exponential_decay', 'decay_rate': 0.2}, 'params': {'one_hot_encode': True, 'n_classes': 10}, 'graph_config': { 'name': 'lenet', 'features': ['image'], 'definition': [ (plx.layers.Conv2d, {'num_filter': 32, 'filter_size': 5, 'strides': 1, 'regularizer': 'l2_regularizer'}), (plx.layers.MaxPool2d, {'kernel_size': 2}), (plx.layers.Conv2d, {'num_filter': 64, 'filter_size': 5, 'regularizer': 'l2_regularizer'}), (plx.layers.MaxPool2d, {'kernel_size': 2}), (plx.layers.FullyConnected, {'n_units': 1024, 'activation': 'tanh'}), (plx.layers.FullyConnected, {'n_units': 10}), ] } } } experiment_config = plx.configs.ExperimentConfig.read_configs(config) return plx.experiments.create_experiment(experiment_config)
def create_experiment_json_fn(output_dir): """Creates an experiment using cnn for MNIST dataset classification task. References: * Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. "Gradient-based learning applied to document recognition." Proceedings of the IEEE, 86(11):2278-2324, November 1998. Links: * [MNIST Dataset] http://yann.lecun.com/exdb/mnist/ """ dataset_dir = './data/mnist' mnist.prepare(dataset_dir) train_data_file = mnist.RECORD_FILE_NAME_FORMAT.format( dataset_dir, plx.Modes.TRAIN) eval_data_file = mnist.RECORD_FILE_NAME_FORMAT.format( dataset_dir, plx.Modes.EVAL) meta_data_file = mnist.MEAT_DATA_FILENAME_FORMAT.format(dataset_dir) config = { 'name': 'conv_mnsit', 'output_dir': output_dir, 'eval_every_n_steps': 5, 'run_config': { 'save_checkpoints_steps': 100 }, 'train_input_data_config': { 'pipeline_config': { 'module': 'TFRecordImagePipeline', 'batch_size': 64, 'num_epochs': 5, 'shuffle': True, 'dynamic_pad': False, 'params': { 'data_files': train_data_file, 'meta_data_file': meta_data_file } }, }, 'eval_input_data_config': { 'pipeline_config': { 'module': 'TFRecordImagePipeline', 'batch_size': 32, 'num_epochs': 1, 'shuffle': True, 'dynamic_pad': False, 'params': { 'data_files': eval_data_file, 'meta_data_file': meta_data_file } }, }, 'estimator_config': { 'output_dir': output_dir }, 'model_config': { 'module': 'Classifier', 'loss_config': { 'module': 'sigmoid_cross_entropy' }, 'eval_metrics_config': [{ 'module': 'streaming_accuracy' }], 'optimizer_config': { 'module': 'adam', 'learning_rate': 0.001 }, 'one_hot_encode': True, 'n_classes': 10, 'graph_config': { 'name': 'convnet', 'features': ['image'], 'definition': [ ('Conv2d', { 'num_filter': 32, 'filter_size': 3, 'strides': 1, 'activation': 'elu', 'regularizer': 'l2_regularizer' }), ('MaxPool2d', { 'kernel_size': 2 }), ('LocalResponseNormalization', {}), ('Conv2d', { 'num_filter': 64, 'filter_size': 3, 'activation': 'relu', 'regularizer': 'l2_regularizer' }), ('MaxPool2d', { 'kernel_size': 2 }), ('LocalResponseNormalization', {}), ('FullyConnected', { 'num_units': 128, 'activation': 'tanh' }), ('Dropout', { 'keep_prob': 0.8 }), ('FullyConnected', { 'num_units': 256, 'activation': 'tanh' }), ('Dropout', { 'keep_prob': 0.8 }), ('FullyConnected', { 'num_units': 10 }), ] } } } experiment_config = plx.configs.ExperimentConfig.read_configs(config) return plx.experiments.create_experiment(experiment_config)
def create_experiment_json_fn(output_dir): """Creates an experiment using cnn for MNIST dataset classification task.""" dataset_dir = './data/mnist' mnist.prepare(dataset_dir) train_data_file = mnist.RECORD_FILE_NAME_FORMAT.format( dataset_dir, plx.Modes.TRAIN) eval_data_file = mnist.RECORD_FILE_NAME_FORMAT.format( dataset_dir, plx.Modes.EVAL) meta_data_file = mnist.MEAT_DATA_FILENAME_FORMAT.format(dataset_dir) config = { 'name': 'conv_highway', 'output_dir': output_dir, 'eval_every_n_steps': 5, 'run_config': { 'save_checkpoints_steps': 100 }, 'train_input_data_config': { 'pipeline_config': { 'module': 'TFRecordImagePipeline', 'batch_size': 64, 'num_epochs': 5, 'shuffle': True, 'dynamic_pad': False, 'params': { 'data_files': train_data_file, 'meta_data_file': meta_data_file } }, }, 'eval_input_data_config': { 'pipeline_config': { 'module': 'TFRecordImagePipeline', 'batch_size': 32, 'num_epochs': 1, 'shuffle': True, 'dynamic_pad': False, 'params': { 'data_files': eval_data_file, 'meta_data_file': meta_data_file } }, }, 'estimator_config': { 'output_dir': output_dir }, 'model_config': { 'module': 'Classifier', 'loss_config': { 'module': 'softmax_cross_entropy' }, 'eval_metrics_config': [{ 'module': 'streaming_accuracy' }], 'optimizer_config': { 'module': 'adam', 'learning_rate': 0.001 }, 'one_hot_encode': True, 'n_classes': 10, 'graph_config': { 'name': 'mnist', 'features': ['image'], 'definition': [ (plx.layers.HighwayConv2d, { 'num_filter': 32, 'filter_size': 3, 'strides': 1, 'activation': 'elu' }), (plx.layers.HighwayConv2d, { 'num_filter': 16, 'filter_size': 2, 'strides': 1, 'activation': 'elu' }), (plx.layers.HighwayConv2d, { 'num_filter': 16, 'filter_size': 1, 'strides': 1, 'activation': 'elu' }), (plx.layers.MaxPool2d, { 'kernel_size': 2 }), (plx.layers.BatchNormalization, {}), (plx.layers.FullyConnected, { 'num_units': 128, 'activation': 'elu' }), (plx.layers.FullyConnected, { 'num_units': 256, 'activation': 'elu' }), (plx.layers.FullyConnected, { 'num_units': 10 }), ] } } } experiment_config = plx.configs.ExperimentConfig.read_configs(config) return plx.experiments.create_experiment(experiment_config)
def create_experiment_json_fn(output_dir): """Creates an experiement using a VGG19 to Oxford's 17 Category Flower Dataset. References: * Very Deep Convolutional Networks for Large-Scale Image Recognition. K. Simonyan, A. Zisserman. arXiv technical report, 2014. Links: * http://arxiv.org/pdf/1409.1556 """ dataset_dir = './data/mnist' mnist.prepare(dataset_dir) train_data_file = mnist.RECORD_FILE_NAME_FORMAT.format( dataset_dir, plx.Modes.TRAIN) eval_data_file = mnist.RECORD_FILE_NAME_FORMAT.format( dataset_dir, plx.Modes.EVAL) meta_data_file = mnist.MEAT_DATA_FILENAME_FORMAT.format(dataset_dir) config = { 'name': 'vgg19', 'output_dir': output_dir, 'eval_every_n_steps': 10, 'train_steps_per_iteration': 100, 'run_config': { 'save_checkpoints_steps': 100 }, 'train_input_data_config': { 'pipeline_config': { 'module': 'TFRecordImagePipeline', 'batch_size': 64, 'num_epochs': 1, 'shuffle': True, 'dynamic_pad': False, 'params': { 'data_files': train_data_file, 'meta_data_file': meta_data_file } }, }, 'eval_input_data_config': { 'pipeline_config': { 'module': 'TFRecordImagePipeline', 'batch_size': 32, 'num_epochs': 1, 'shuffle': True, 'dynamic_pad': False, 'params': { 'data_files': eval_data_file, 'meta_data_file': meta_data_file } }, }, 'estimator_config': { 'output_dir': output_dir }, 'model_config': { 'module': 'Classifier', 'summaries': ['loss'], 'loss_config': { 'module': 'softmax_cross_entropy' }, 'eval_metrics_config': [{ 'module': 'streaming_accuracy' }, { 'module': 'streaming_precision' }], 'optimizer_config': { 'module': 'adam', 'learning_rate': 0.007, 'decay_type': 'exponential_decay', 'decay_rate': 0.2 }, 'one_hot_encode': True, 'n_classes': 17, 'graph_config': { 'name': 'vgg', 'features': ['image'], 'definition': [ (plx.layers.Conv2d, { 'num_filter': 64, 'filter_size': 3, 'activation': 'relu' }), (plx.layers.Conv2d, { 'num_filter': 64, 'filter_size': 3, 'activation': 'relu' }), (plx.layers.MaxPool2d, { 'kernel_size': 2, 'strides': 2 }), (plx.layers.Conv2d, { 'num_filter': 128, 'filter_size': 3, 'activation': 'relu' }), (plx.layers.Conv2d, { 'num_filter': 128, 'filter_size': 3, 'activation': 'relu' }), (plx.layers.MaxPool2d, { 'kernel_size': 2, 'strides': 2 }), (plx.layers.Conv2d, { 'num_filter': 256, 'filter_size': 3, 'activation': 'relu' }), (plx.layers.Conv2d, { 'num_filter': 256, 'filter_size': 3, 'activation': 'relu' }), (plx.layers.Conv2d, { 'num_filter': 256, 'filter_size': 3, 'activation': 'relu' }), (plx.layers.MaxPool2d, { 'kernel_size': 2, 'strides': 2 }), (plx.layers.Conv2d, { 'num_filter': 512, 'filter_size': 3, 'activation': 'relu' }), (plx.layers.Conv2d, { 'num_filter': 512, 'filter_size': 3, 'activation': 'relu' }), (plx.layers.Conv2d, { 'num_filter': 512, 'filter_size': 3, 'activation': 'relu' }), (plx.layers.MaxPool2d, { 'kernel_size': 2, 'strides': 2 }), (plx.layers.FullyConnected, { 'num_units': 4096, 'activation': 'relu', 'dropout': 0.5 }), (plx.layers.FullyConnected, { 'num_units': 4096, 'activation': 'relu', 'dropout': 0.5 }), (plx.layers.FullyConnected, { 'num_units': 17 }), ] } } } experiment_config = plx.configs.ExperimentConfig.read_configs(config) return plx.experiments.create_experiment(experiment_config)
def create_experiment_json_fn(output_dir): """Creates an auto encoder on MNIST handwritten digits. inks: * [MNIST Dataset] http://yann.lecun.com/exdb/mnist/ """ dataset_dir = './data/mnist' mnist.prepare(dataset_dir) train_data_file = mnist.RECORD_FILE_NAME_FORMAT.format( dataset_dir, plx.Modes.TRAIN) eval_data_file = mnist.RECORD_FILE_NAME_FORMAT.format( dataset_dir, plx.Modes.EVAL) meta_data_file = mnist.MEAT_DATA_FILENAME_FORMAT.format(dataset_dir) config = { 'name': 'autoencoder_mnsit', 'output_dir': output_dir, 'eval_every_n_steps': 100, 'train_steps_per_iteration': 100, 'run_config': { 'save_checkpoints_steps': 100 }, 'train_input_data_config': { 'pipeline_config': { 'module': 'TFRecordImagePipeline', 'batch_size': 64, 'num_epochs': 10, 'shuffle': True, 'dynamic_pad': False, 'params': { 'data_files': train_data_file, 'meta_data_file': meta_data_file }, 'definition': { 'image': [ (plx.processing.image.Standardization, {}), (plx.layers.Reshape, { 'new_shape': [28 * 28] }), ] } }, }, 'eval_input_data_config': { 'pipeline_config': { 'module': 'TFRecordImagePipeline', 'batch_size': 32, 'num_epochs': 1, 'shuffle': True, 'dynamic_pad': False, 'params': { 'data_files': eval_data_file, 'meta_data_file': meta_data_file }, 'definition': { 'image': [(plx.processing.image.Standardization, {}), (plx.layers.Reshape, { 'new_shape': [28 * 28] })] } }, }, 'estimator_config': { 'output_dir': output_dir }, 'model_config': { 'module': 'Generator', 'summaries': ['loss'], 'loss_config': { 'module': 'mean_squared_error' }, 'optimizer_config': { 'module': 'adadelta', 'learning_rate': 0.9 }, 'encoder_config': { 'definition': [ (plx.layers.FullyConnected, { 'num_units': 128 }), (plx.layers.FullyConnected, { 'num_units': 256 }), ] }, 'decoder_config': { 'definition': [ (plx.layers.FullyConnected, { 'num_units': 256 }), (plx.layers.FullyConnected, { 'num_units': 28 * 28 }), ] } } } experiment_config = plx.configs.ExperimentConfig.read_configs(config) return plx.experiments.create_experiment(experiment_config)
def create_experiment_json_fn(output_dir): """Creates an auto encoder on MNIST handwritten digits. inks: * [MNIST Dataset] http://yann.lecun.com/exdb/mnist/ """ dataset_dir = './data/mnist' mnist.prepare(dataset_dir) train_data_file = mnist.RECORD_FILE_NAME_FORMAT.format( dataset_dir, plx.Modes.TRAIN) eval_data_file = mnist.RECORD_FILE_NAME_FORMAT.format( dataset_dir, plx.Modes.EVAL) meta_data_file = mnist.MEAT_DATA_FILENAME_FORMAT.format(dataset_dir) config = { 'name': 'denoising_conv_autoencoder_mnsit', 'output_dir': output_dir, 'eval_every_n_steps': 100, 'train_steps_per_iteration': 1000, 'run_config': { 'save_checkpoints_steps': 1000 }, 'train_input_data_config': { 'pipeline_config': { 'module': 'TFRecordImagePipeline', 'batch_size': 64, 'num_epochs': 5, 'shuffle': True, 'dynamic_pad': False, 'params': { 'data_files': train_data_file, 'meta_data_file': meta_data_file }, 'definition': { 'image': [(plx.processing.image.Standardization, {}), (plx.layers.GaussianNoise, { 'scale': 0.5 })] } }, }, 'eval_input_data_config': { 'pipeline_config': { 'module': 'TFRecordImagePipeline', 'batch_size': 32, 'num_epochs': 1, 'shuffle': True, 'dynamic_pad': False, 'params': { 'data_files': eval_data_file, 'meta_data_file': meta_data_file }, 'definition': { 'image': [ (plx.processing.image.Standardization, {}), ] } }, }, 'estimator_config': { 'output_dir': output_dir }, 'model_config': { 'summaries': ['loss', 'image_input', 'image_result'], 'module': 'Generator', 'optimizer_config': { 'module': 'adadelta', 'learning_rate': 0.9 }, 'encoder_config': { 'definition': [ (plx.layers.Conv2d, { 'num_filter': 32, 'filter_size': 3, 'strides': 1, 'activation': 'relu', 'regularizer': 'l2_regularizer' }), (plx.layers.MaxPool2d, { 'kernel_size': 2 }), (plx.layers.Conv2d, { 'num_filter': 32, 'filter_size': 3, 'activation': 'relu', 'regularizer': 'l2_regularizer' }), (plx.layers.MaxPool2d, { 'kernel_size': 2 }), ] }, 'decoder_config': { 'definition': [ (plx.layers.Conv2d, { 'num_filter': 32, 'filter_size': 3, 'strides': 1, 'activation': 'relu', 'regularizer': 'l2_regularizer' }), (plx.layers.Upsample2d, { 'kernel_size': 2 }), (plx.layers.Conv2d, { 'num_filter': 32, 'filter_size': 3, 'activation': 'relu', 'regularizer': 'l2_regularizer' }), (plx.layers.Upsample2d, { 'kernel_size': 2 }), (plx.layers.Conv2d, { 'num_filter': 1, 'filter_size': 3, 'activation': 'relu', 'regularizer': 'l2_regularizer' }), ] } } } experiment_config = plx.configs.ExperimentConfig.read_configs(config) return plx.experiments.create_experiment(experiment_config)
def create_experiment_json_fn(output_dir): """Creates an experiment using deep residual network. References: * K. He, X. Zhang, S. Ren, and J. Sun. Deep Residual Learning for Image Recognition, 2015. * Y. LeCun, L. Bottou, Y. Bengio, and P. Haffner. "Gradient-based learning applied to document recognition." Proceedings of the IEEE, 86(11):2278-2324, November 1998. Links: * [Deep Residual Network](http://arxiv.org/pdf/1512.03385.pdf) * [MNIST Dataset](http://yann.lecun.com/exdb/mnist/) """ dataset_dir = './data/mnist' mnist.prepare(dataset_dir) train_data_file = mnist.RECORD_FILE_NAME_FORMAT.format(dataset_dir, plx.Modes.TRAIN) eval_data_file = mnist.RECORD_FILE_NAME_FORMAT.format(dataset_dir, plx.Modes.EVAL) meta_data_file = mnist.MEAT_DATA_FILENAME_FORMAT.format(dataset_dir) config = { 'name': 'residual_net_mnist', 'output_dir': output_dir, 'eval_every_n_steps': 10, 'train_steps_per_iteration': 100, 'run_config': {'save_checkpoints_steps': 100}, 'train_input_data_config': { 'pipeline_config': {'module': 'TFRecordImagePipeline', 'batch_size': 64, 'num_epochs': 1, 'shuffle': True, 'dynamic_pad': False, 'params': {'data_files': train_data_file, 'meta_data_file': meta_data_file}, }, }, 'eval_input_data_config': { 'pipeline_config': {'module': 'TFRecordImagePipeline', 'batch_size': 32, 'num_epochs': 1, 'shuffle': True, 'dynamic_pad': False, 'params': {'data_files': eval_data_file, 'meta_data_file': meta_data_file}}, }, 'estimator_config': {'output_dir': output_dir}, 'model_config': { 'summaries': 'all', 'module': 'Classifier', 'loss_config': {'module': 'softmax_cross_entropy'}, 'eval_metrics_config': [{'module': 'streaming_accuracy'}], 'optimizer_config': {'module': 'adam', 'learning_rate': 0.07, 'decay_type': 'exponential_decay', 'decay_rate': 0.2}, 'one_hot_encode': True, 'n_classes': 10, 'graph_config': { 'name': 'resnet', 'features': ['image'], 'definition': [ (plx.layers.Conv2d, {'num_filter': 64, 'filter_size': 3, 'activation': 'relu'}), (plx.layers.ResidualBottleneck, {'num_blocks': 3, 'bottleneck_size': 16, 'out_channels': 64}), (plx.layers.ResidualBottleneck, {'num_blocks': 1, 'bottleneck_size': 32, 'out_channels': 128, 'downsample': True}), (plx.layers.ResidualBottleneck, {'num_blocks': 2, 'bottleneck_size': 32, 'out_channels': 128}), (plx.layers.ResidualBottleneck, {'num_blocks': 1, 'bottleneck_size': 64, 'out_channels': 256, 'downsample': True}), (plx.layers.ResidualBottleneck, {'num_blocks': 2, 'bottleneck_size': 64, 'out_channels': 256}), (plx.layers.BatchNormalization, {}), (plx.layers.GlobalAvgPool, {}), (plx.layers.FullyConnected, {'num_units': 10}) ] } } } experiment_config = plx.configs.ExperimentConfig.read_configs(config) return plx.experiments.create_experiment(experiment_config)