def main(): model = MnistModel() optimizer = Optimizer() data_source = MnistDataSource() cfg = TrainConfig() cfg.load(model, optimizer, data_source, task_name="mnist", framework_name='Lasagne') saver = TrainSaver(cfg.prm['work_dir'], cfg.prm['project_name'], cfg.prm['model_filename_prefix'], data_source, task_name="mnist", suffix="_ls") trainer = Trainer(model=model, optimizer=optimizer, data_source=data_source, saver=saver) trainer.train(num_epoch=cfg.prm['max_num_epoch'], epoch_tail=cfg.prm['min_num_epoch'])
def main(): model = MnistModel() optimizer = Optimizer() data_source = MnistDataSource() cfg = TrainConfig() cfg.load(model, optimizer, data_source, task_name="mnist", framework_name='Gluon') saver = TrainSaver(cfg.prm['work_dir'], cfg.prm['project_name'], cfg.prm['model_filename_prefix'], data_source, task_name="mnist", suffix="_gl") #ctx = [mx.gpu(i) for i in cfg.prm['gpus']] if cfg.prm['gpus'] else mx.cpu() ctx = mx.gpu(0) if cfg.prm['gpus'] else mx.cpu() trainer = Trainer(model=model, optimizer=optimizer, data_source=data_source, saver=saver, ctx=ctx) trainer.train(num_epoch=cfg.prm['max_num_epoch'], epoch_tail=cfg.prm['min_num_epoch'])
def main(): args = parse_args() model = MnistModel() Converter.export_to_h5( model=model, checkpoint_path=os.path.join(args.checkpoint_dir, args.file_name), dst_filepath=args.dst_filepath)
def main(): args = parse_args() model = MnistModel() Converter.import_from_h5(model=model, src_filepath=args.src_filepath, checkpoint_path=os.path.join( args.checkpoint_dir, args.file_name))
def main(): args = parse_args(framework_name='TensorFlow') model = MnistModel() data_source = MnistDataSource(use_augmentation=False) data_source.update_project_dirname(args.data_cache_dir) Estimator.estimate(model=model, data_source=data_source, checkpoint_path=os.path.join(args.checkpoint_dir, args.file_name))
def main(): args = parse_args() model = MnistModel() data_source = MnistDataSource(use_augmentation=False) data_source.update_project_dirname(args.data_cache_dir) ctx = [mx.gpu(i) for i in args.gpus] if args.gpus else mx.cpu() Converter.import_from_h5( model=model, data_source=data_source, src_filepath=args.src_filepath, checkpoint_path=os.path.join(args.checkpoint_dir, args.prefix), checkpoint_epoch=args.epoch, ctx=ctx)
def main(): model = MnistModel() optimizer = Optimizer() data_source = MnistDataSource() cfg = TrainConfig() cfg.load(model, optimizer, data_source, task_name="mnist", framework_name='TFLearn') saver = TrainSaver(cfg.prm['work_dir'], cfg.prm['project_name'], cfg.prm['model_filename_prefix'], data_source=data_source, task_name="mnist", suffix="_tfl") trainer = Trainer(model=model, optimizer=optimizer, data_source=data_source, saver=saver) # trainer.train( # num_epoch=cfg.prm['max_num_epoch'], # epoch_tail = cfg.prm['min_num_epoch'], # dat_gaussian_blur_sigma_max=1.0, # dat_gaussian_noise_sigma_max=0.05, # dat_perspective_transform_max_pt_deviation=1, # dat_max_scale_add=1.0 / (28.0 / 2), # dat_max_translate=2.0, # dat_rotate_max_angle_rad=0.2617994) trainer.hyper_train(num_epoch=cfg.prm['max_num_epoch'], epoch_tail=cfg.prm['min_num_epoch'], bo_num_iter=cfg.prm['bo_num_iter'], bo_kappa=cfg.prm['bo_kappa'], bo_min_rand_num=cfg.prm['bo_min_rand_num'], bo_results_filename='mnist_hyper.csv', synch_file_list=cfg.prm['synch_list'], sync_period=cfg.prm['sync_period'])
def main(): model = MnistModel() optimizer = Optimizer() data_source = MnistDataSource(use_augmentation=False) cfg = TrainConfig() cfg.load( model, optimizer, data_source, task_name="mnist", framework_name='MXNet') saver = TrainSaver( cfg.prm['work_dir'], cfg.prm['project_name'], cfg.prm['model_filename_prefix'], data_source, task_name="mnist", suffix="_mx") ctx = [mx.gpu(i) for i in cfg.prm['gpus']] if cfg.prm['gpus'] else mx.cpu() trainer = Trainer( model=model, optimizer=optimizer, data_source=data_source, saver=saver, ctx=ctx) trainer.hyper_train(num_epoch=cfg.prm['max_num_epoch'], epoch_tail=cfg.prm['min_num_epoch'], bo_num_iter=cfg.prm['bo_num_iter'], bo_kappa=cfg.prm['bo_kappa'], bo_min_rand_num=cfg.prm['bo_min_rand_num'], bo_results_filename='mnist_hyper.csv', synch_file_list=cfg.prm['synch_list'], sync_period=cfg.prm['sync_period'])
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) import sys sys.path.append(examples_dir) from mnist_model import MnistModel return MnistModel if __name__ == '__main__': parser = ArgumentParser(add_help=False) parser.add_argument('--gpus', type=str, default=None) MnistModel = import_model() # give the module a chance to add own params # good practice to define LightningModule speficic params in the module parser = MnistModel.add_model_specific_args(parser) # parse params hparams = parser.parse_args() # init module model = MnistModel(hparams.batch_size, hparams.learning_rate) # most basic trainer, uses good defaults trainer = Trainer( max_epochs=hparams.max_nb_epochs, gpus=hparams.gpus, val_check_interval=0.2, logger=DAGsHubLogger( ), # This is the main point - use the DAGsHub logger! default_root_dir='lightning_logs',
from mnist_model import MnistModel if __name__ == '__main__': mnist = MnistModel() mnist.train()
return MnistModel if __name__ == '__main__': # Read parameters from a versioned file, which should also be a DVC dependency. # This is the purest use case hparams_from_file = read_hparams('params.yml') # OPTIONAL: # Allow some hyperparameters to be defined in the command line parser = ArgumentParser(add_help=False) parser.add_argument('--gpus', type=str, default=None, required=False) # Parse args from command line, overriding params from file hparams = parser.parse_args(namespace=hparams_from_file) MnistModel = import_model() # init module model = MnistModel(hparams) # most basic trainer, uses good defaults trainer = Trainer( max_nb_epochs=hparams.max_nb_epochs, gpus=hparams.gpus, val_check_interval=0.2, logger=DAGsHubLogger(should_log_hparams=False), # This is the main point - use the DAGsHub logger! default_save_path='lightning_logs', ) trainer.fit(model)
from mnist_model import MnistModel if __name__ == '__main__': mnist = MnistModel() mnist.gen() mnist.show()
import tensorflow as tf # import pandas as pd import csv import numpy as np from mnist_model import MnistModel from tensorflow.python.framework import ops import matplotlib.pyplot as plt data_size = 42000 # max 42k test_size = 280 # max 28k dim = 784 lambd = 0.01 # learning_rate = 0.001 mm = MnistModel() train_dataset, train_labels = mm.load_csv_or_pickle("train.csv", "train.pickle", data_size, dim) def split_data(X, Y, dev_size, test_size): num_examples = X.shape[1] train_size = num_examples - test_size - dev_size permutation = list(np.random.permutation(num_examples)) shuffled_X = X[:, permutation] shuffled_Y = Y[:, permutation] X_train = shuffled_X[:, :train_size] Y_train = shuffled_Y[:, :train_size] X_dev = shuffled_X[:, train_size:train_size + dev_size] Y_dev = shuffled_Y[:, train_size:train_size + dev_size] X_test = shuffled_X[:, train_size + dev_size:]
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) import sys sys.path.append(examples_dir) from mnist_model import MnistModel return MnistModel if __name__ == '__main__': parser = ArgumentParser(add_help=False) parser.add_argument('--gpus', type=str, default=None) MnistModel = import_model() # give the module a chance to add own params # good practice to define LightningModule speficic params in the module parser = MnistModel.add_model_specific_args(parser) # parse params hparams = parser.parse_args() # init module model = MnistModel(hparams) # most basic trainer, uses good defaults trainer = Trainer( max_nb_epochs=hparams.max_nb_epochs, gpus=hparams.gpus, val_check_interval=0.2, logger=DAGsHubLogger( ), # This is the main point - use the DAGsHub logger! default_save_path='lightning_logs',