예제 #1
0
def load_experiment() -> Configs:
    conf = Configs()
    experiment.evaluate()

    # This will download a pretrained model checkpoint and some cached files.
    # It will download the archive as `saved_checkpoint.tar.gz` and extract it.
    #
    # If you have a locally trained model load it directly with
    # run_uuid = 'RUN_UUID'
    # And for latest checkpoint
    # checkpoint = None

    # run_uuid = 'a6cff3706ec411ebadd9bf753b33bae6'  # bpe
    # checkpoint = None
    run_uuid, checkpoint = experiment.load_bundle(
        lab.get_path() / 'saved_checkpoint.tar.gz',
        url=
        'https://github.com/lab-ml/python_autocomplete/releases/download/0.0.5/bundle.tar.gz'
    )

    conf_dict = experiment.load_configs(run_uuid)
    conf_dict['text.is_load_data'] = False
    experiment.configs(conf, conf_dict)
    experiment.add_pytorch_models(get_modules(conf))
    experiment.load(run_uuid, checkpoint)

    experiment.start()

    return conf
예제 #2
0
def get_predictor():
    conf = Configs()
    experiment.evaluate()

    # This will download a pretrained model checkpoint and some cached files.
    # It will download the archive as `saved_checkpoint.tar.gz` and extract it.
    #
    # If you have a locally trained model load it directly with
    # run_uuid = 'RUN_UUID'
    # And for latest checkpoint
    # checkpoint = None
    run_uuid, checkpoint = experiment.load_bundle(
        lab.get_path() / 'saved_checkpoint.tar.gz',
        url=
        'https://github.com/lab-ml/python_autocomplete/releases/download/0.0.4/transformer_checkpoint.tar.gz'
    )

    conf_dict = experiment.load_configs(run_uuid)
    experiment.configs(conf, conf_dict)
    experiment.add_pytorch_models(get_modules(conf))
    experiment.load(run_uuid, checkpoint)

    experiment.start()
    conf.model.eval()
    return Predictor(conf.model, cache('stoi', lambda: conf.text.stoi),
                     cache('itos', lambda: conf.text.itos))
예제 #3
0
def main():
    # Create experiment
    experiment.create(name="hyper_lstm", comment='')
    # Create configs
    conf = Configs()
    # Load configurations
    experiment.configs(conf,
                       # A dictionary of configurations to override
                       {'tokenizer': 'character',
                        'text': 'tiny_shakespeare',
                        'optimizer.learning_rate': 2.5e-4,
                        'optimizer.optimizer': 'Adam',
                        'prompt': 'It is',
                        'prompt_separator': '',

                        'rnn_model': 'hyper_lstm',

                        'train_loader': 'shuffled_train_loader',
                        'valid_loader': 'shuffled_valid_loader',

                        'seq_len': 512,
                        'epochs': 128,
                        'batch_size': 2,
                        'inner_iterations': 25})

    # Set models for saving and loading
    experiment.add_pytorch_models(get_modules(conf))

    # Start the experiment
    with experiment.start():
        # `TrainValidConfigs.run`
        conf.run()
예제 #4
0
def main():
    # Create experiment
    experiment.create(name="knn_lm", comment='', writers={'tensorboard', 'sqlite', 'screen'})
    # Create configs
    conf = Configs()
    # Load configurations
    experiment.configs(conf,
                       # A dictionary of configurations to override
                       {'tokenizer': 'character',
                        'prompt_separator': '',
                        'prompt': 'It is ',
                        'text': 'tiny_shakespeare',

                        'seq_len': 1024,
                        'epochs': 128,
                        'batch_size': 6,
                        'inner_iterations': 10,

                        # Transformer configurations
                        'transformer.d_model': 256,
                        'transformer.d_ff': 1024,
                        'transformer.n_heads': 8,
                        'transformer.n_layers': 6})

    # This is needed to initialize models
    conf.n_tokens = conf.text.n_tokens

    # Set models for saving and loading
    experiment.add_pytorch_models(get_modules(conf))

    # Start the experiment
    with experiment.start():
        # `TrainValidConfigs.run`
        conf.run()
예제 #5
0
def main():
    # Create experiment
    experiment.create(name="feedback_transformer")
    # Create configs
    conf = Configs()
    # Load configurations
    experiment.configs(
        conf,
        # A dictionary of configurations to override
        {
            'tokenizer': 'character',
            'text': 'tiny_shakespeare',
            'optimizer.learning_rate': 1.0,
            'optimizer.optimizer': 'Noam',
            'prompt': 'It is',
            'prompt_separator': '',
            'train_loader': 'shuffled_train_loader',
            'valid_loader': 'shuffled_valid_loader',
            'seq_len': 64,
            'epochs': 128,
            'batch_size': 80,
            'inner_iterations': 25
        })

    # Set models for saving and loading
    experiment.add_pytorch_models({'model': conf.model})

    conf.init()
    # Start the experiment
    with experiment.start():
        conf.run()
예제 #6
0
def main():
    conf = Configs()
    experiment.create(name='mnist_labml_helpers')
    experiment.configs(conf, {'optimizer.optimizer': 'Adam'})
    conf.set_seed.set()
    experiment.add_pytorch_models(dict(model=conf.model))
    with experiment.start():
        conf.run()
예제 #7
0
파일: schedule.py 프로젝트: skiedra/labml
def main():
    experiment.create(name='test_schedule', writers={'screen', 'web_api'})
    lr = DynamicSchedule(0.01, (0, 1))
    experiment.configs({'lr': lr})
    with experiment.start():
        for epoch in monit.loop(100):
            tracker.save('hp.lr', lr())
            time.sleep(1)
예제 #8
0
파일: __init__.py 프로젝트: Hadryan/nn
def main():
    conf = Configs()
    experiment.create(name='mnist_dcgan')
    experiment.configs(conf,
                       {'discriminator': 'cnn',
                        'generator': 'cnn',
                        'label_smoothing': 0.01})
    with experiment.start():
        conf.run()
예제 #9
0
def main():
    # Create experiment
    experiment.create(name="rotary_pe_transformer")
    # Create configs
    conf = Configs()
    # Override configurations
    experiment.configs(
        conf,
        {
            # No fixed positional embeddings
            'transformer.src_embed': 'no_pos',
            'transformer.tgt_embed': 'no_pos',

            # Encoder with RoPE
            'transformer.encoder_attn': 'rotary',

            #
            'model': 'rotary_pe_transformer',

            # Use character level tokenizer
            'tokenizer': 'character',
            # Prompt separator is blank
            'prompt_separator': '',
            # Starting prompt for sampling
            'prompt': 'It is ',
            # Use Tiny Shakespeare dataset
            'text': 'tiny_shakespeare',

            # Use a context size of $256$
            'seq_len': 512,
            # Train for 32 epochs
            'epochs': 32,
            # Batch size $4$
            'batch_size': 4,
            # Switch between training and validation for $10$ times
            # per epoch
            'inner_iterations': 10,

            # Model size
            'd_model': 128,
            'transformer.ffn.d_ff': 512,
            'transformer.n_heads': 16,
            'transformer.dropout': 0.0,

            # Use [Noam optimizer](../../optimizers/noam.html)
            'optimizer.optimizer': 'Noam',
            'optimizer.learning_rate': 1.,
            'dataloader_shuffle_with_replacement': True
        })

    # Set models for saving and loading
    experiment.add_pytorch_models({'model': conf.model})

    # Start the experiment
    with experiment.start():
        # Run training
        conf.run()
예제 #10
0
파일: mnist.py 프로젝트: xiaming9880/labml
def main():
    conf = Configs()
    experiment.create(name='mnist_latest')
    experiment.configs(conf, {
        'device.cuda_device': 0,
        'optimizer.optimizer': 'Adam'
    })
    experiment.add_pytorch_models(dict(model=conf.model))
    with experiment.start():
        conf.run()
예제 #11
0
def main():
    conf = Configs()
    experiment.create(name='cifar_10')
    experiment.configs(conf,
                       {'optimizer.optimizer': 'Adam',
                        'optimizer.learning_rate': 1e-4})
    conf.set_seed.set()
    experiment.add_pytorch_models(dict(model=conf.model))
    with experiment.start():
        conf.run()
예제 #12
0
def main():
    # Create experiment
    experiment.create(name='mnist_batch_norm')
    # Create configurations
    conf = MNISTConfigs()
    # Load configurations
    experiment.configs(conf, {'optimizer.optimizer': 'Adam'})
    # Start the experiment and run the training loop
    with experiment.start():
        conf.run()
예제 #13
0
def main():
    experiment.create(name='test_dynamic_hp', writers={'screen', 'web_api'})
    lr = FloatDynamicHyperParam(0.01, (0, 1))
    # experiment.configs({'lr': lr})
    conf = Configs()
    experiment.configs(conf)
    lr = conf.lr
    with experiment.start():
        for epoch in monit.loop(100):
            tracker.save('hp.lr', lr())
            time.sleep(1)
예제 #14
0
파일: mnist.py 프로젝트: israrbacha/nn
def main():
    """
    Run the experiment
    """
    conf = Configs()
    experiment.create(name='mnist_latest')
    experiment.configs(conf, {'optimizer.optimizer': 'Adam',
                              'device.cuda_device': 1},
                       'run')
    with experiment.start():
        conf.run()
예제 #15
0
def main():
    # Configurations
    configs = {
        'epochs': 10,
        'train_batch_size': 64,
        'valid_batch_size': 100,
        'use_cuda': True,
        'seed': 5,
        'train_log_interval': 10,
        'learning_rate': 0.01,
    }

    is_cuda = configs['use_cuda'] and torch.cuda.is_available()
    if not is_cuda:
        device = torch.device("cpu")
    else:
        device = torch.device(f"cuda:0")

    train_loader = torch.utils.data.DataLoader(
        RemoteDataset('mnist_train'),
        batch_size=configs['train_batch_size'],
        shuffle=True,
        num_workers=4)

    valid_loader = torch.utils.data.DataLoader(
        RemoteDataset('mnist_valid'),
        batch_size=configs['valid_batch_size'],
        shuffle=False,
        num_workers=4)

    model = Net().to(device)
    optimizer = optim.Adam(model.parameters(), lr=configs['learning_rate'])

    torch.manual_seed(configs['seed'])

    # ✨ Create the experiment
    experiment.create(name='mnist_labml_monit')

    # ✨ Save configurations
    experiment.configs(configs)

    # ✨ Set PyTorch models for checkpoint saving and loading
    experiment.add_pytorch_models(dict(model=model))

    # ✨ Start and monitor the experiment
    with experiment.start():
        for _ in monit.loop(range(1, configs['epochs'] + 1)):
            train(model, optimizer, train_loader, device,
                  configs['train_log_interval'])
            validate(model, valid_loader, device)
            logger.log()

    # save the model
    experiment.save_checkpoint()
예제 #16
0
def main():
    # Create experiment
    experiment.create(name="primer_ez")
    # Create configs
    conf = Configs()
    # Override configurations
    experiment.configs(
        conf,
        {
            # Use character level tokenizer
            'tokenizer': 'character',
            # Prompt separator is blank
            'prompt_separator': '',
            # Starting prompt for sampling
            'prompt': 'It is ',
            # Use Tiny Shakespeare dataset
            'text': 'tiny_shakespeare',

            # Use a context size of $256$
            'seq_len': 256,
            # Train for $128$ epochs
            'epochs': 128,
            # Batch size $32$
            'batch_size': 32,
            # Switch between training and validation for $10$ times
            # per epoch
            'inner_iterations': 10,

            # Model size
            'd_model': 512,
            'transformer.ffn.d_ff': 2048,

            # Use Adam optimizer
            'optimizer.optimizer': 'Adam',
            'optimizer.learning_rate': 2.5e-4,

            # ⭐️ Use [**squared ReLU**](index.html) activation in the feed forward network.
            #
            # *Replace this with `ReLU` for $ReLU$.*
            'transformer.ffn.activation': 'SquaredReLU',

            # ⭐️ Use [**Multi-DConv-Head Attention**](index.html) for encoder attention.
            #
            # *Replace this with `mha` for original multi-head attention.*
            'transformer.encoder_attn': 'MultiDConvHeadAttention',
        })

    # Set models for saving and loading
    experiment.add_pytorch_models({'model': conf.model})

    # Start the experiment
    with experiment.start():
        # Run training
        conf.run()
예제 #17
0
def main():
    conf = Configs()
    experiment.create(name='mnist_gan', comment='test')
    experiment.configs(
        conf, {
            'generator_optimizer.learning_rate': 2.5e-4,
            'generator_optimizer.optimizer': 'Adam',
            'discriminator_optimizer.learning_rate': 2.5e-4,
            'discriminator_optimizer.optimizer': 'Adam'
        }, ['set_seed', 'main'])
    with experiment.start():
        conf.run()
예제 #18
0
def main():
    conf = Configs()
    experiment.create(name='configs')
    experiment.configs(conf, {'optimizer': 'sgd_optimizer'})

    torch.manual_seed(conf.seed)

    with experiment.start():
        conf.run()

    # save the model
    experiment.save_checkpoint()
예제 #19
0
def main():
    conf = Configs()
    conf.inner_iterations = 10
    experiment.create(name='mnist_ada_belief')
    experiment.configs(conf, {'inner_iterations': 10,
                              # Specify the optimizer
                              'optimizer.optimizer': 'Adam',
                              'optimizer.learning_rate': 1.5e-4})
    conf.set_seed.set()
    experiment.add_pytorch_models(dict(model=conf.model))
    with experiment.start():
        conf.run()
예제 #20
0
def main():
    # Create the experiment
    experiment.create(name='ppo')
    # Configurations
    configs = {
        # Number of updates
        'updates': 10000,
        # ⚙️ Number of epochs to train the model with sampled data.
        # You can change this while the experiment is running.
        # [![Example](https://img.shields.io/badge/example-hyperparams-brightgreen)](https://app.labml.ai/run/6eff28a0910e11eb9b008db315936e2f/hyper_params)
        'epochs': IntDynamicHyperParam(8),
        # Number of worker processes
        'n_workers': 8,
        # Number of steps to run on each process for a single update
        'worker_steps': 128,
        # Number of mini batches
        'batches': 4,
        # ⚙️ Value loss coefficient.
        # You can change this while the experiment is running.
        # [![Example](https://img.shields.io/badge/example-hyperparams-brightgreen)](https://app.labml.ai/run/6eff28a0910e11eb9b008db315936e2f/hyper_params)
        'value_loss_coef': FloatDynamicHyperParam(0.5),
        # ⚙️ Entropy bonus coefficient.
        # You can change this while the experiment is running.
        # [![Example](https://img.shields.io/badge/example-hyperparams-brightgreen)](https://app.labml.ai/run/6eff28a0910e11eb9b008db315936e2f/hyper_params)
        'entropy_bonus_coef': FloatDynamicHyperParam(0.01),
        # ⚙️ Clip range.
        'clip_range': FloatDynamicHyperParam(0.1),
        # You can change this while the experiment is running.
        # [![Example](https://img.shields.io/badge/example-hyperparams-brightgreen)](https://app.labml.ai/run/6eff28a0910e11eb9b008db315936e2f/hyper_params)
        # ⚙️ Learning rate.
        'learning_rate': FloatDynamicHyperParam(1e-3, (0, 1e-3)),
    }

    experiment.configs(configs)

    # Initialize the trainer
    m = Trainer(
        updates=configs['updates'],
        epochs=configs['epochs'],
        n_workers=configs['n_workers'],
        worker_steps=configs['worker_steps'],
        batches=configs['batches'],
        value_loss_coef=configs['value_loss_coef'],
        entropy_bonus_coef=configs['entropy_bonus_coef'],
        clip_range=configs['clip_range'],
        learning_rate=configs['learning_rate'],
    )

    # Run and monitor the experiment
    with experiment.start():
        m.run_training_loop()
    # Stop the workers
    m.destroy()
예제 #21
0
파일: experiment.py 프로젝트: wx-b/nn
def main():
    # Create experiment
    experiment.create(name='cifar10', comment='group norm')
    # Create configurations
    conf = Configs()
    # Load configurations
    experiment.configs(conf, {
        'optimizer.optimizer': 'Adam',
        'optimizer.learning_rate': 2.5e-4,
    })
    # Start the experiment and run the training loop
    with experiment.start():
        conf.run()
예제 #22
0
def main():
    conf = Configs()
    experiment.create(name="source_code_eval", comment='lstm model')

    # Replace this with your training experiment UUID
    conf_dict = experiment.load_configs('6f10a292e77211ea89d69979079dc3d6')
    experiment.configs(conf, conf_dict, 'run')
    experiment.add_pytorch_models(get_modules(conf))
    experiment.load('6f10a292e77211ea89d69979079dc3d6')

    experiment.start()
    evaluator = Evaluator(conf.model, conf.text, conf.text.valid, False)
    evaluator.eval()
예제 #23
0
파일: experiment.py 프로젝트: wx-b/nn
def main():
    """
    Run the experiment
    """
    experiment.create(name='ponder_net')

    conf = Configs()
    experiment.configs(conf, {
        'optimizer.optimizer': 'Adam',
        'optimizer.learning_rate': 0.0003,
    })

    with experiment.start():
        conf.run()
예제 #24
0
파일: mnist.py 프로젝트: zeta1999/nn
def main():
    """
    Run the experiment
    """
    conf = Configs()
    experiment.create(name='capsule_network_mnist')
    experiment.configs(
        conf, {
            'optimizer.optimizer': 'Adam',
            'optimizer.learning_rate': 1e-3,
            'device.cuda_device': 1
        })
    with experiment.start():
        conf.run()
예제 #25
0
파일: mnist.py 프로젝트: Sandy4321/nn-1
def main():
    """
    Run the experiment
    """
    experiment.create(name='capsule_network_mnist')
    conf = Configs()
    experiment.configs(conf, {
        'optimizer.optimizer': 'Adam',
        'optimizer.learning_rate': 1e-3
    })

    experiment.add_pytorch_models({'model': conf.model})

    with experiment.start():
        conf.run()
예제 #26
0
def main():
    # Create experiment
    experiment.create(name="glu_variants")
    # Create configs
    configs = Configs()
    # Load configurations
    experiment.configs(dataclasses.asdict(configs))

    trainer = Trainer(configs)
    experiment.add_pytorch_models({'model': trainer.model})

    # Start the experiment
    with experiment.start():
        # `TrainValidConfigs.run`
        trainer.train()
예제 #27
0
def get_predictor():
    conf = Configs()
    experiment.evaluate()

    # Replace this with your training experiment UUID
    run_uuid = '39b03a1e454011ebbaff2b26e3148b3d'

    conf_dict = experiment.load_configs(run_uuid)
    experiment.configs(conf, conf_dict)
    experiment.add_pytorch_models(get_modules(conf))
    experiment.load(run_uuid)

    experiment.start()
    conf.model.eval()
    return Predictor(conf.model, cache('stoi', lambda: conf.text.stoi), cache('itos', lambda: conf.text.itos))
예제 #28
0
파일: experiment.py 프로젝트: wx-b/nn
def main():
    # Create experiment
    experiment.create(name='cifar10', comment='weight standardization')
    # Create configurations
    conf = CIFAR10Configs()
    # Load configurations
    experiment.configs(
        conf, {
            'optimizer.optimizer': 'Adam',
            'optimizer.learning_rate': 2.5e-4,
            'train_batch_size': 64,
        })
    # Start the experiment and run the training loop
    with experiment.start():
        conf.run()
예제 #29
0
def main():
    conf = Configs()
    conf.n_layers = 6
    conf.seq_len = 512
    conf.epochs = 1024
    conf.model = 'transformer_model'
    experiment.create(name="source_code",
                      comment='lstm model')
    experiment.configs(conf, {
        'optimizer.optimizer': 'Noam',
        'device.cuda_device': 0
    }, 'run')
    experiment.add_pytorch_models(get_modules(conf))
    # experiment.load('d5ba7f56d88911eaa6629b54a83956dc')
    experiment.start()
    conf.run()
예제 #30
0
def main():
    experiment.create()
    conf = Configs()
    conf.activation = 'relu'
    conf.dropout = 0.1
    experiment.configs(conf,
                       {'conv_sizes': [(128, 2), (256, 4)],
                        'optimizer.learning_rate': 1e-4,
                        'optimizer.optimizer': 'Adam'})

    with experiment.start():
        with monit.section('Initialize'):
            conf.initialize()
        with tracker.namespace('valid'):
            conf.valid_dataset.save_artifacts()
        conf.run()