def main(): # Create experiment experiment.create(name="rotary_shakespeare", comment="rotary value", writers={'screen', 'labml'}) # Create configs conf = Configs() # Override configurations experiment.configs( conf, { # No fixed positional embeddings 'transformer.src_embed': 'no_pos', 'transformer.tgt_embed': 'no_pos', # Encoder with RoPE 'transformer.encoder_attn': 'rotary_value', # 'transformer.encoder_attn': 'rotary', # 'model': 'rotary_pe_transformer', # Use character level tokenizer 'tokenizer': 'character', # Prompt separator is blank 'prompt_separator': '', # Starting prompt for sampling 'prompt': 'It is ', # Use Tiny Shakespeare dataset 'text': 'tiny_shakespeare', # Use a context size of $256$ 'seq_len': 512, # Train for 32 epochs 'epochs': 24, # Batch size $4$ 'batch_size': 16, # Switch between training and validation for $10$ times # per epoch 'inner_iterations': 4, # Model size 'd_model': 128, 'transformer.ffn.d_ff': 512, 'transformer.n_heads': 4, 'transformer.dropout': 0.0, # Use [Adam optimizer](../../optimizers/noam.html) 'optimizer.optimizer': 'Adam', 'optimizer.learning_rate': 2.5e-4, 'dataloader_shuffle_with_replacement': True }) # Set models for saving and loading experiment.add_pytorch_models({'model': conf.model}) # Start the experiment with experiment.start(): # Run training conf.run()
def main(): conf = Configs() experiment.create(name='who_won') experiment.calculate_configs(conf, {}, ['run']) experiment.add_pytorch_models(dict(model=conf.model)) experiment.start() conf.run()
def main(): """ ### Train StyleGAN2 """ # Create an experiment experiment.create(name='stylegan2') # Create configurations object configs = Configs() # Set configurations and override some experiment.configs(configs, { 'device.cuda_device': 0, 'image_size': 64, 'log_generated_interval': 200 }) # Initialize configs.init() # Set models for saving and loading experiment.add_pytorch_models(mapping_network=configs.mapping_network, generator=configs.generator, discriminator=configs.discriminator) # Start the experiment with experiment.start(): # Run the training loop configs.train()
def main(): # Create experiment experiment.create(name='ViT', comment='cifar10') # Create configurations conf = Configs() # Load configurations experiment.configs( conf, { # Optimizer 'optimizer.optimizer': 'Adam', 'optimizer.learning_rate': 2.5e-4, # Transformer embedding size 'transformer.d_model': 512, # Training epochs and batch size 'epochs': 1000, 'train_batch_size': 64, # Augment CIFAR 10 images for training 'train_dataset': 'cifar10_train_augmented', # Do not augment CIFAR 10 images for validation 'valid_dataset': 'cifar10_valid_no_augment', }) # Set model for saving/loading experiment.add_pytorch_models({'model': conf.model}) # Start the experiment and run the training loop with experiment.start(): conf.run()
def main(): # Create experiment experiment.create(name="hyper_lstm", comment='') # Create configs conf = Configs() # Load configurations experiment.configs(conf, # A dictionary of configurations to override {'tokenizer': 'character', 'text': 'tiny_shakespeare', 'optimizer.learning_rate': 2.5e-4, 'optimizer.optimizer': 'Adam', 'prompt': 'It is', 'prompt_separator': '', 'rnn_model': 'hyper_lstm', 'train_loader': 'shuffled_train_loader', 'valid_loader': 'shuffled_valid_loader', 'seq_len': 512, 'epochs': 128, 'batch_size': 2, 'inner_iterations': 25}) # Set models for saving and loading experiment.add_pytorch_models(get_modules(conf)) # Start the experiment with experiment.start(): # `TrainValidConfigs.run` conf.run()
def main(): # Create experiment experiment.create(name="knn_lm", comment='', writers={'tensorboard', 'sqlite', 'screen'}) # Create configs conf = Configs() # Load configurations experiment.configs(conf, # A dictionary of configurations to override {'tokenizer': 'character', 'prompt_separator': '', 'prompt': 'It is ', 'text': 'tiny_shakespeare', 'seq_len': 1024, 'epochs': 128, 'batch_size': 6, 'inner_iterations': 10, # Transformer configurations 'transformer.d_model': 256, 'transformer.d_ff': 1024, 'transformer.n_heads': 8, 'transformer.n_layers': 6}) # This is needed to initialize models conf.n_tokens = conf.text.n_tokens # Set models for saving and loading experiment.add_pytorch_models(get_modules(conf)) # Start the experiment with experiment.start(): # `TrainValidConfigs.run` conf.run()
def main(): conf = Configs() experiment.create(name='mnist_gan', comment='test') experiment.configs(conf, {'label_smoothing': 0.01}) with experiment.start(): conf.run()
def main(): conf = Configs() experiment.create(name='probabilities_fixed_cards') experiment.calculate_configs(conf, {}, ['run']) experiment.add_pytorch_models(dict(model=conf.model)) experiment.start() conf.run()
def main(local_rank, rank, world_size, uuid, init_method: str = 'tcp://localhost:23456'): with monit.section('Distributed'): torch.distributed.init_process_group( "gloo", timeout=datetime.timedelta(seconds=30), init_method=init_method, rank=rank, world_size=world_size) conf = Configs() experiment.create(uuid=uuid, name="source_code_ddp", comment='lstm model') experiment.distributed(local_rank, world_size) experiment.configs( conf, { 'model': 'transformer_model', 'n_layers': 6, 'batch_size': 12, 'epochs': 32, 'optimizer.optimizer': 'Noam', 'optimizer.learning_rate': 1.0, 'device.cuda_device': local_rank, 'seq_len': 512, 'train_loader': 'shuffled_train_loader', 'valid_loader': 'shuffled_valid_loader' }) experiment.add_pytorch_models(model=conf.ddp_model) with experiment.start(): conf.run()
def main(): conf = Configs() # Assign one of transformer_mode, lstm_model, or rhn_model experiment.create(name="source_code", comment='bpe') experiment.configs( conf, { # 'model': 'transformer_model', 'model': 'transformer_xl_model', 'n_layers': 6, 'epochs': 32, 'optimizer.optimizer': 'AdamW', 'optimizer.learning_rate': 1.25e-4, 'device.cuda_device': 0, 'is_token_by_token': True, 'state_updater': 'transformer_memory', 'mem_len': 256, 'text.is_shuffle': False, 'text.tokenizer': 'bpe', 'text.batch_size': 12, 'text.seq_len': 256, # # 'inner_iterations': 10, # 'text.truncate_data': 100_000, }) experiment.add_pytorch_models(model=conf.model) with experiment.start(): conf.run()
def main(run_uuid: str, checkpoint: int): """ Train a small model with distillation """ # Load saved model large_model = get_saved_model(run_uuid, checkpoint) # Create experiment experiment.create(name='distillation', comment='cifar10') # Create configurations conf = Configs() # Set the loaded large model conf.large = large_model # Load configurations experiment.configs( conf, { 'optimizer.optimizer': 'Adam', 'optimizer.learning_rate': 2.5e-4, 'model': '_small_student_model', }) # Set model for saving/loading experiment.add_pytorch_models({'model': conf.model}) # Start experiment from scratch experiment.load(None, None) # Start the experiment and run the training loop with experiment.start(): conf.run()
def main(): # Create experiment experiment.create(name='diffuse', writers={'screen', 'comet'}) # Create configurations configs = Configs() # Set configurations. You can override the defaults by passing the values in the dictionary. experiment.configs( configs, { 'dataset': 'CelebA', # 'MNIST' 'image_channels': 3, # 1, 'epochs': 100, # 5, }) # Initialize configs.init() # Set models for saving and loading experiment.add_pytorch_models({'eps_model': configs.eps_model}) # Start and run the training loop with experiment.start(): configs.run()
def main(local_rank, rank, world_size, uuid, init_method: str = 'tcp://localhost:23456'): with monit.section('Distributed'): torch.distributed.init_process_group( "gloo", timeout=datetime.timedelta(seconds=30), init_method=init_method, rank=rank, world_size=world_size) conf = Configs() experiment.create(uuid=uuid, name='mnist ddp') experiment.distributed(local_rank, world_size) experiment.configs( conf, { 'optimizer.optimizer': 'Adam', 'optimizer.learning_rate': 1e-4, 'model': 'ddp_model', 'device.cuda_device': local_rank }) conf.set_seed.set() experiment.add_pytorch_models(dict(model=conf.model)) with experiment.start(): conf.run()
def main(): # Create experiment experiment.create(name="feedback_transformer") # Create configs conf = Configs() # Load configurations experiment.configs( conf, # A dictionary of configurations to override { 'tokenizer': 'character', 'text': 'tiny_shakespeare', 'optimizer.learning_rate': 1.0, 'optimizer.optimizer': 'Noam', 'prompt': 'It is', 'prompt_separator': '', # Use `feedback_transformer` for original feedback transformer 'model': 'feedback_transformer_kv', 'train_loader': 'shuffled_train_loader', 'valid_loader': 'shuffled_valid_loader', 'seq_len': 128, 'epochs': 128, 'batch_size': 64, 'inner_iterations': 25 }) # Set models for saving and loading experiment.add_pytorch_models(get_modules(conf)) # Start the experiment with experiment.start(): # Run the training loop conf.run()
def main(): """ ### Run the experiment """ # Create experiment experiment.create(name="transformer_xl", comment='') # Create configs conf = Configs() # Load configurations experiment.configs( conf, # A dictionary of configurations to override { 'tokenizer': 'character', 'text': 'tiny_shakespeare', 'optimizer.learning_rate': 1., 'optimizer.optimizer': 'Noam', 'prompt': 'I ', 'prompt_separator': '', 'train_loader': 'sequential_train_loader', 'valid_loader': 'sequential_valid_loader', 'seq_len': 2, 'mem_len': 32, 'epochs': 128, 'batch_size': 32, 'inner_iterations': 25, }) # Set models for saving and loading experiment.add_pytorch_models({'model': conf.model}) # Start the experiment with experiment.start(): # `TrainValidConfigs.run` conf.run()
def main(): conf = Configs() experiment.create(name='mnist_latest') conf.optimizer = 'adam_optimizer' experiment.calculate_configs(conf, {}, ['set_seed', 'run']) experiment.add_pytorch_models(dict(model=conf.model)) experiment.start() conf.run()
def main(): experiment.create(name='Test') with experiment.start(): for i in range(1, 401): tracker.add_global_step() time.sleep(1) tracker.save(loss=1.)
def main(): conf = Configs() experiment.create(name='cifar_10', writers={'sqlite'}) conf.optimizer = 'adam_optimizer' experiment.calculate_configs(conf, {}, ['set_seed', 'run']) experiment.add_pytorch_models(dict(model=conf.model)) experiment.start() conf.run()
def main(): experiment.create(name='test_schedule', writers={'screen', 'web_api'}) lr = DynamicSchedule(0.01, (0, 1)) experiment.configs({'lr': lr}) with experiment.start(): for epoch in monit.loop(100): tracker.save('hp.lr', lr()) time.sleep(1)
def main(): conf = Configs() experiment.create(name='mnist_labml_helpers') experiment.configs(conf, {'optimizer.optimizer': 'Adam'}) conf.set_seed.set() experiment.add_pytorch_models(dict(model=conf.model)) with experiment.start(): conf.run()
def main(): experiment.create(name='mnist_lit_lightening', disable_screen=True) model = LitMNIST() trainer = pl.Trainer(gpus=1, max_epochs=3, progress_bar_refresh_rate=20, logger=LabMLLightningLogger()) with experiment.start(): trainer.fit(model)
def main(): conf = Configs() experiment.create(name='mnist_dcgan') experiment.configs(conf, {'discriminator': 'cnn', 'generator': 'cnn', 'label_smoothing': 0.01}) with experiment.start(): conf.run()
def main(): # Create experiment experiment.create(name="rotary_pe_transformer") # Create configs conf = Configs() # Override configurations experiment.configs( conf, { 'device.cuda_device': 1, # No fixed positional embeddings 'transformer.src_embed': 'no_pos', 'transformer.tgt_embed': 'no_pos', # Encoder with RoPE 'transformer.encoder_attn': 'rotary', # 'model': 'rotary_pe_transformer', # Use character level tokenizer 'tokenizer': 'character', # Prompt separator is blank 'prompt_separator': '', # Starting prompt for sampling 'prompt': 'It is ', # Use Tiny Shakespeare dataset 'text': 'tiny_shakespeare', # Use a context size of $256$ 'seq_len': 256, # Train for $128$ epochs 'epochs': 128, # Batch size $32$ 'batch_size': 32, # Switch between training and validation for $10$ times # per epoch 'inner_iterations': 10, # Model size 'd_model': 128, 'transformer.ffn.d_ff': 256, # Use [Noam optimizer](../../optimizers/noam.html) 'optimizer.optimizer': 'Noam', 'optimizer.learning_rate': 1., }) # Set models for saving and loading experiment.add_pytorch_models({'model': conf.model}) # Start the experiment with experiment.start(): # Run training conf.run()
def main(): conf = Configs() experiment.create(name='sklearn', writers={'sqlite'}) experiment.calculate_configs(conf) experiment.add_sklearn_models(dict(model=conf.model)) experiment.start() conf.run() experiment.save_checkpoint()
def main(): conf = Configs() experiment.create(name='mnist_configs', writers={'sqlite'}) conf.optimizer = 'sgd_optimizer' experiment.calculate_configs(conf, {}, ['set_seed', 'loop']) experiment.add_pytorch_models(dict(model=conf.model)) experiment.start() conf.loop()
def main(): # Create experiment experiment.create(name='mnist_batch_norm') # Create configurations conf = MNISTConfigs() # Load configurations experiment.configs(conf, {'optimizer.optimizer': 'Adam'}) # Start the experiment and run the training loop with experiment.start(): conf.run()
def search(conf: Configs): tracker.set_global_step(0) experiment.create(name='mnist_hyperparam_tuning') experiment.calculate_configs(conf, {}, ['set_seed', 'run']) experiment.add_pytorch_models(dict(model=conf.model)) experiment.start() conf.run() tracker.reset()
def main(): conf = Configs() experiment.create(name='mnist_latest') experiment.configs(conf, { 'device.cuda_device': 0, 'optimizer.optimizer': 'Adam' }) experiment.add_pytorch_models(dict(model=conf.model)) with experiment.start(): conf.run()
def main(): # Create the experiment experiment.create(name='dqn') # Initialize the trainer m = Trainer() # Run and monitor the experiment with experiment.start(): m.run_training_loop() # Stop the workers m.destroy()
def main(): conf = Configs() experiment.create(name='cifar_10') experiment.configs(conf, {'optimizer.optimizer': 'Adam', 'optimizer.learning_rate': 1e-4}) conf.set_seed.set() experiment.add_pytorch_models(dict(model=conf.model)) with experiment.start(): conf.run()